file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
printtips.js
//获取打印机数量 function getPrinterCount(LODOP) { return LODOP.GET_PRINTER_COUNT(); }; //获取打印机 function getPrinterName(LODOP,iPrinterNO) { return LODOP.GET_PRINTER_NAME(iPrinterNO); }; /** 小票打印 */ //no,name,vac,price,pay,time,insurance function printtips(data){ if(!data){ layer.msg("参数异常,打印失败",{icon:1}); return false; } var pinstr; if(data.pin=="1"){ pinstr = "第一剂次" } if(data.pin=="2"){ pinstr = "第二剂次" } if(data.pin=="3"){ pinstr = "第三剂次" } if(data.pin=="4"){ pinstr = "第四剂次" } if(data.pin=="5"){ pinstr = "第五剂次" } if(data.impart.group == '17' && data.pin=="1"){ pinstr = "第三剂次" } if(data.impart.group == '17' && data.pin=="2"){ pinstr = "第四剂次" } var birthday = new Date(data.name.birthday.replace(/-/g,"/")); var now = new Date(); var printArgs = new Object(); printArgs["roomCode"] = data.no.substring(0,1); printArgs["no"] = data.no; printArgs["childCode"] = data.name.childcode; printArgs["childName"] = data.name.childname; printArgs["birth"] = data.name.birthday; printArgs["vaccName"] = data.vac; printArgs["companyName"] = data.product.manufacturer; printArgs["batch"] = data.product.batchno; printArgs["price"] = data.price; printArgs["pay"] = data.pay==0?"未付款":data.pay; printArgs["createTime"] = data.time; printArgs["wait"] = data.wait<0?0:data.wait; printArgs["disTitle"] = data.impart.title; printArgs["disPin"] = pinstr; printArgs["disBirth"] = birthday.getFullYear()+ '年' + (birthday.getMonth()+1) + '月' + birthday.getDate() + '日'; printArgs["disInTime"] = now.getFullYear()+ '年' + (now.getMonth()+1) + '月' + now.getDate() + '日'; console.info("当前os版本" + gs.ClientOs()); if(gs.ClientOs().indexOf("Win") < 0){ document.location = "js://printtipview?sign=" + (data.sign?1:0) +"&data="+escape(JSON.stringify(printArgs)); console.info("调用安卓打印,任务结束"); return true; } var html = ""; var hh = 10; if(data.type=='reserve'){ html = '<div style="height: 5px;width: 250px"></div>'+ '<div style="font-family: 微软雅黑;width: 250px">'+ '<div style="width: 250px;height: auto;border-top:1px solid #000;border-bottom:1px dashed #000;position: relative;margin:10px;">'+ '<span style="text-align: center;position: absolute; top: -13px;background: #fff;left: 38%;padding: 0 10px; z-index: 999">智慧接种</span>'+ '<p style="margin: 0; line-height: 22px;font-size: 12px;padding-left: 5px; margin-top:8px;"><span>编号:</span>' + data.no.substring(0,1) + '</p>'+ '<p style="margin: 0; line-height: 22px;font-size: 12px;padding-left: 5px;"><span>姓名:</span>' + data.name.childname + '(' + data.name.birthday + ')</p>'+ '<p style="margin: 0; line-height: 22px;font-size: 12px;padding-left: 5px;"><span>疫苗:</span>' + data.vaccName + '</p><p>&nbsp;</p>'+ '</div>'+ '</div>'; hh = 420; }else{ var paradoxicalreaction = data.name.paradoxicalreaction; if(!paradoxicalreaction){ paradoxicalreaction = "无"; } html = '<div style="height: 5px;width: 250px"></div>'+ '<div style="font-family: 微软雅黑;width: 250px">'; if(data.no){ hh += 700; html +='<div style="width: 250px;height: auto;border-top:1px solid #000;border-bottom:1px dashed #000;position: relative;margin:10px 10px 10px 10px;">'+ '<span style="text-align: center;position: absolute; top: -13px;background: #fff;left: 38%;padding: 0 10px; z-index: 999">智慧接种</span>'+ '<h2 style="text-align: center;margin-top:8px; margin-bottom: 8px;">第#noo#接种室 &nbsp;&nbsp;#no#</h2>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>儿童编号:</span>#childcode#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>儿童姓名:</span>#name#</p>'; if(data.pay==0 && data.price){ html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗名称:</span>#vac#</p>'; }else{ html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗名称:</span>#vac#</p>'; } html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px; width:160px;"><span>疫苗厂家:</span>#manu#</p>'; html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗批号:</span>#batch#</p>'; html +='<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗价格:</span>¥#price#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>付费状态:</span>#pay#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>排号时间:</span>#time#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>等待人数:</span>#insurance#</p><hr>' } // '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>保险:</span>#insurance#</p>'+ if(data.impart.title){ hh = hh+ 800; html += '<p style="margin: 0; margin-top:10px; line-height: 22px;font-size: 14x;text-align: center;"><span><strong>' + data.impart.title + '</strong></span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 14px;text-align: center;"><span><strong>接种知情告知书回执(' + pinstr + ')</strong></span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; padding-top: 15px;"><span>受种者姓名:<span style="text-decoration: underline;">&nbsp;&nbsp;' + data.name.childname + '&nbsp;&nbsp;</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>出生日期:'+birthday.getFullYear()+ '年' + (birthday.getMonth()+1) + '月' + birthday.getDate() + '日</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span><strong>健康状况</strong></span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>1.近期是否发热>37.5C、急性传染病。(无)</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>2.以往有无过敏史(' + paradoxicalreaction + ')</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>3.有无癫痫病、神经系统疾病史及惊厥史。(无)</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>4.是否患有严重慢性疾病。(无)</span></p>'; if(!data.sign){ html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;padding-top: 5px;"><span>家长或监护人签字:<span style="text-decoration: underline;">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span></span></p>' html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>接种时间:'+now.getFullYear()+ '年' + (now.getMonth()+1) + '月' + now.getDate() + '日</span></p>' } if(da
&& data.impart.choose != 'normal'){ // hh = hh + 200; // var cho = data.impart.choose; // html += cho.replace(new RegExp("#","gm"),"\""); // } hh = hh + 100; html += '<p style="margin: 0; line-height: 20px;font-size: 14px;padding-left: 5px;font-weight: bold;">' + data.vac + '</p>'; html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;">厂家:' + data.product.manufacturer + '&nbsp;&nbsp;批号:' + data.product.batchno + '</p>'; } html += '</div>'+ '</div>'; html = html.replace("#no#",data.no) .replace("#childcode#",data.name.childcode) .replace("#name#",data.name.childname+'(' + data.name.birthday + ')') .replace("#vac#",data.vac) .replace("#price#",data.price) .replace("#pay#",data.pay==0?"未付款":data.pay) .replace("#time#",data.time) .replace("#insurance#",data.wait<0?0:data.wait); html = html.replace("#noo#",data.no.substring(0,1)).replace("#manu#",data.manu); html = html.replace("#batch#",(data.product.batchno)); } var LODOP; //声明为全局变量 try{ LODOP = getLodop(); } catch(err) { console.error("打印控件出错"); }; if (LODOP == null || LODOP ==''){ return false; } LODOP.SET_LICENSES("安徽奇兵医学科技有限公司","56E2EB898EE17DEBD030D1E8A683CAFE","安徽奇兵醫學科技有限公司","423D486AF17E2120FEB7B2BDDF66F396"); LODOP.SET_LICENSES("THIRD LICENSE","","AnHui Ace-power Medical and Technology Co., Ltd","709251107F8D9D680D1A81F88BED121F"); LODOP.PRINT_INIT("登记台排号小票"); LODOP.ADD_PRINT_HTM(0,0,"100%","120%",html); LODOP.SET_PRINT_PAGESIZE(1,900,2100,""); if(preference.quickOption != 1 && data.pay && data.pay==0 && data.price){ LODOP.ADD_PRINT_BARCODE("30mm","50mm","28mm","28mm","QRCode","C_"+data.no + "_" + data.name.localCode); console.info("C_"+data.no + "_" + data.name.localCode); } if(data.sign && preference.quickOption != 1){ LODOP.ADD_PRINT_IMAGE("142mm","15mm",180,67.5,"data:image/png;base64,"+data.sign); LODOP.SET_PRINT_STYLEA(0,"Stretch",2); }else if(data.sign && preference.quickOption == 1){ LODOP.ADD_PRINT_IMAGE((hh-800)+"px","15mm",180,67.5,"data:image/png;base64,"+data.sign); LODOP.SET_PRINT_STYLEA(0,"Stretch",2); } var hasPrint = false; for(var i = 0; i < getPrinterCount(LODOP); i ++){ if("XP-80C" == getPrinterName(LODOP,i)){ console.info("找到打印机XP-80C-->"+i); hasPrint = true; LODOP.SET_PRINTER_INDEX("XP-80C"); // LODOP.PREVIEW(); LODOP.PRINT(); } } /* 打印关注二维码 */ if(hasPrint && data.nextTime && data.nextVacc){ html = '<div style="height: 5px;width: 250px"></div>'+ '<div style="font-family: 微软雅黑;width: 250px">'+ '<div style="width: 250px;height: auto;border-top:1px solid #000;border-bottom:0px dashed #000;position: relative;margin:15px 10px 10px 10px;">'+ '<span style="text-align: center;position: absolute; top: -13px;background: #fff;left: 38%;padding: 0 10px; z-index: 999">智慧接种</span>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; padding-top: 18px;"><span>宝宝姓名:</span>' + data.name.childname + '</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>下次接种日期:</span>' + data.nextTime + '</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>预&ensp;约&ensp;时&ensp;间&nbsp;:</span>' + data.selectTime + '</p>'; html+='<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>下次接种疫苗:</span><br/>&nbsp;&nbsp;&nbsp;&nbsp;' + data.nextVacc + '</p></div></div>'; html2 ='<div style="font-family: 微软雅黑;width: 250px">'+ '<div style="width: 250px;height: auto;border-top:0px;border-bottom:1px dashed #000;position: relative;margin:0 10px 10px 10px;">'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>扫码关注公众号,即可随时接受宝宝接种疫苗提醒、了解最新疫苗咨询</span></p></div></div>'; LODOP=getLodop(); LODOP.SET_LICENSES("安徽奇兵医学科技有限公司","56E2EB898EE17DEBD030D1E8A683CAFE","安徽奇兵醫學科技有限公司","423D486AF17E2120FEB7B2BDDF66F396"); LODOP.SET_LICENSES("THIRD LICENSE","","AnHui Ace-power Medical and Technology Co., Ltd","709251107F8D9D680D1A81F88BED121F"); LODOP.PRINT_INIT("微信关注二维码"); LODOP.ADD_PRINT_HTM("-10",0,"100%","100%",html); LODOP.ADD_PRINT_BARCODE("40mm","26mm","30mm","30mm","QRCode","http://www.chinavacc.cn/wpwx/child/attenT.do?id=" + data.name.id); LODOP.ADD_PRINT_HTM("60mm",0,"100%","100%",html2); LODOP.SET_PRINT_PAGESIZE(3,800,"",""); LODOP.SET_PRINTER_INDEX("XP-80C"); // LODOP.PREVIEW(); LODOP.PRINT(); } if(!hasPrint){ console.error("未找到小票打印机XP-80C,打印小票失败"); layer.msg("未找到小票打印机XP-80C,打印小票失败",{icon:1,offsetTop: 500}); } return true; }
ta.sign){ hh = hh + 200; html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; padding-top: 5px;"><span>家长或监护人签字:<span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span></span></p>' html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; margin-top:80px;"><span>接种时间:'+now.getFullYear()+ '年' + (now.getMonth()+1) + '月' + now.getDate() + '日</span></p>' } // if(data.impart.choose
conditional_block
printtips.js
//获取打印机数量 function getPrinterCoun
turn LODOP.GET_PRINTER_COUNT(); }; //获取打印机 function getPrinterName(LODOP,iPrinterNO) { return LODOP.GET_PRINTER_NAME(iPrinterNO); }; /** 小票打印 */ //no,name,vac,price,pay,time,insurance function printtips(data){ if(!data){ layer.msg("参数异常,打印失败",{icon:1}); return false; } var pinstr; if(data.pin=="1"){ pinstr = "第一剂次" } if(data.pin=="2"){ pinstr = "第二剂次" } if(data.pin=="3"){ pinstr = "第三剂次" } if(data.pin=="4"){ pinstr = "第四剂次" } if(data.pin=="5"){ pinstr = "第五剂次" } if(data.impart.group == '17' && data.pin=="1"){ pinstr = "第三剂次" } if(data.impart.group == '17' && data.pin=="2"){ pinstr = "第四剂次" } var birthday = new Date(data.name.birthday.replace(/-/g,"/")); var now = new Date(); var printArgs = new Object(); printArgs["roomCode"] = data.no.substring(0,1); printArgs["no"] = data.no; printArgs["childCode"] = data.name.childcode; printArgs["childName"] = data.name.childname; printArgs["birth"] = data.name.birthday; printArgs["vaccName"] = data.vac; printArgs["companyName"] = data.product.manufacturer; printArgs["batch"] = data.product.batchno; printArgs["price"] = data.price; printArgs["pay"] = data.pay==0?"未付款":data.pay; printArgs["createTime"] = data.time; printArgs["wait"] = data.wait<0?0:data.wait; printArgs["disTitle"] = data.impart.title; printArgs["disPin"] = pinstr; printArgs["disBirth"] = birthday.getFullYear()+ '年' + (birthday.getMonth()+1) + '月' + birthday.getDate() + '日'; printArgs["disInTime"] = now.getFullYear()+ '年' + (now.getMonth()+1) + '月' + now.getDate() + '日'; console.info("当前os版本" + gs.ClientOs()); if(gs.ClientOs().indexOf("Win") < 0){ document.location = "js://printtipview?sign=" + (data.sign?1:0) +"&data="+escape(JSON.stringify(printArgs)); console.info("调用安卓打印,任务结束"); return true; } var html = ""; var hh = 10; if(data.type=='reserve'){ html = '<div style="height: 5px;width: 250px"></div>'+ '<div style="font-family: 微软雅黑;width: 250px">'+ '<div style="width: 250px;height: auto;border-top:1px solid #000;border-bottom:1px dashed #000;position: relative;margin:10px;">'+ '<span style="text-align: center;position: absolute; top: -13px;background: #fff;left: 38%;padding: 0 10px; z-index: 999">智慧接种</span>'+ '<p style="margin: 0; line-height: 22px;font-size: 12px;padding-left: 5px; margin-top:8px;"><span>编号:</span>' + data.no.substring(0,1) + '</p>'+ '<p style="margin: 0; line-height: 22px;font-size: 12px;padding-left: 5px;"><span>姓名:</span>' + data.name.childname + '(' + data.name.birthday + ')</p>'+ '<p style="margin: 0; line-height: 22px;font-size: 12px;padding-left: 5px;"><span>疫苗:</span>' + data.vaccName + '</p><p>&nbsp;</p>'+ '</div>'+ '</div>'; hh = 420; }else{ var paradoxicalreaction = data.name.paradoxicalreaction; if(!paradoxicalreaction){ paradoxicalreaction = "无"; } html = '<div style="height: 5px;width: 250px"></div>'+ '<div style="font-family: 微软雅黑;width: 250px">'; if(data.no){ hh += 700; html +='<div style="width: 250px;height: auto;border-top:1px solid #000;border-bottom:1px dashed #000;position: relative;margin:10px 10px 10px 10px;">'+ '<span style="text-align: center;position: absolute; top: -13px;background: #fff;left: 38%;padding: 0 10px; z-index: 999">智慧接种</span>'+ '<h2 style="text-align: center;margin-top:8px; margin-bottom: 8px;">第#noo#接种室 &nbsp;&nbsp;#no#</h2>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>儿童编号:</span>#childcode#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>儿童姓名:</span>#name#</p>'; if(data.pay==0 && data.price){ html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗名称:</span>#vac#</p>'; }else{ html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗名称:</span>#vac#</p>'; } html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px; width:160px;"><span>疫苗厂家:</span>#manu#</p>'; html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗批号:</span>#batch#</p>'; html +='<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗价格:</span>¥#price#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>付费状态:</span>#pay#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>排号时间:</span>#time#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>等待人数:</span>#insurance#</p><hr>' } // '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>保险:</span>#insurance#</p>'+ if(data.impart.title){ hh = hh+ 800; html += '<p style="margin: 0; margin-top:10px; line-height: 22px;font-size: 14x;text-align: center;"><span><strong>' + data.impart.title + '</strong></span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 14px;text-align: center;"><span><strong>接种知情告知书回执(' + pinstr + ')</strong></span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; padding-top: 15px;"><span>受种者姓名:<span style="text-decoration: underline;">&nbsp;&nbsp;' + data.name.childname + '&nbsp;&nbsp;</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>出生日期:'+birthday.getFullYear()+ '年' + (birthday.getMonth()+1) + '月' + birthday.getDate() + '日</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span><strong>健康状况</strong></span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>1.近期是否发热>37.5C、急性传染病。(无)</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>2.以往有无过敏史(' + paradoxicalreaction + ')</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>3.有无癫痫病、神经系统疾病史及惊厥史。(无)</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>4.是否患有严重慢性疾病。(无)</span></p>'; if(!data.sign){ html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;padding-top: 5px;"><span>家长或监护人签字:<span style="text-decoration: underline;">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span></span></p>' html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>接种时间:'+now.getFullYear()+ '年' + (now.getMonth()+1) + '月' + now.getDate() + '日</span></p>' } if(data.sign){ hh = hh + 200; html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; padding-top: 5px;"><span>家长或监护人签字:<span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span></span></p>' html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; margin-top:80px;"><span>接种时间:'+now.getFullYear()+ '年' + (now.getMonth()+1) + '月' + now.getDate() + '日</span></p>' } // if(data.impart.choose && data.impart.choose != 'normal'){ // hh = hh + 200; // var cho = data.impart.choose; // html += cho.replace(new RegExp("#","gm"),"\""); // } hh = hh + 100; html += '<p style="margin: 0; line-height: 20px;font-size: 14px;padding-left: 5px;font-weight: bold;">' + data.vac + '</p>'; html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;">厂家:' + data.product.manufacturer + '&nbsp;&nbsp;批号:' + data.product.batchno + '</p>'; } html += '</div>'+ '</div>'; html = html.replace("#no#",data.no) .replace("#childcode#",data.name.childcode) .replace("#name#",data.name.childname+'(' + data.name.birthday + ')') .replace("#vac#",data.vac) .replace("#price#",data.price) .replace("#pay#",data.pay==0?"未付款":data.pay) .replace("#time#",data.time) .replace("#insurance#",data.wait<0?0:data.wait); html = html.replace("#noo#",data.no.substring(0,1)).replace("#manu#",data.manu); html = html.replace("#batch#",(data.product.batchno)); } var LODOP; //声明为全局变量 try{ LODOP = getLodop(); } catch(err) { console.error("打印控件出错"); }; if (LODOP == null || LODOP ==''){ return false; } LODOP.SET_LICENSES("安徽奇兵医学科技有限公司","56E2EB898EE17DEBD030D1E8A683CAFE","安徽奇兵醫學科技有限公司","423D486AF17E2120FEB7B2BDDF66F396"); LODOP.SET_LICENSES("THIRD LICENSE","","AnHui Ace-power Medical and Technology Co., Ltd","709251107F8D9D680D1A81F88BED121F"); LODOP.PRINT_INIT("登记台排号小票"); LODOP.ADD_PRINT_HTM(0,0,"100%","120%",html); LODOP.SET_PRINT_PAGESIZE(1,900,2100,""); if(preference.quickOption != 1 && data.pay && data.pay==0 && data.price){ LODOP.ADD_PRINT_BARCODE("30mm","50mm","28mm","28mm","QRCode","C_"+data.no + "_" + data.name.localCode); console.info("C_"+data.no + "_" + data.name.localCode); } if(data.sign && preference.quickOption != 1){ LODOP.ADD_PRINT_IMAGE("142mm","15mm",180,67.5,"data:image/png;base64,"+data.sign); LODOP.SET_PRINT_STYLEA(0,"Stretch",2); }else if(data.sign && preference.quickOption == 1){ LODOP.ADD_PRINT_IMAGE((hh-800)+"px","15mm",180,67.5,"data:image/png;base64,"+data.sign); LODOP.SET_PRINT_STYLEA(0,"Stretch",2); } var hasPrint = false; for(var i = 0; i < getPrinterCount(LODOP); i ++){ if("XP-80C" == getPrinterName(LODOP,i)){ console.info("找到打印机XP-80C-->"+i); hasPrint = true; LODOP.SET_PRINTER_INDEX("XP-80C"); // LODOP.PREVIEW(); LODOP.PRINT(); } } /* 打印关注二维码 */ if(hasPrint && data.nextTime && data.nextVacc){ html = '<div style="height: 5px;width: 250px"></div>'+ '<div style="font-family: 微软雅黑;width: 250px">'+ '<div style="width: 250px;height: auto;border-top:1px solid #000;border-bottom:0px dashed #000;position: relative;margin:15px 10px 10px 10px;">'+ '<span style="text-align: center;position: absolute; top: -13px;background: #fff;left: 38%;padding: 0 10px; z-index: 999">智慧接种</span>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; padding-top: 18px;"><span>宝宝姓名:</span>' + data.name.childname + '</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>下次接种日期:</span>' + data.nextTime + '</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>预&ensp;约&ensp;时&ensp;间&nbsp;:</span>' + data.selectTime + '</p>'; html+='<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>下次接种疫苗:</span><br/>&nbsp;&nbsp;&nbsp;&nbsp;' + data.nextVacc + '</p></div></div>'; html2 ='<div style="font-family: 微软雅黑;width: 250px">'+ '<div style="width: 250px;height: auto;border-top:0px;border-bottom:1px dashed #000;position: relative;margin:0 10px 10px 10px;">'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>扫码关注公众号,即可随时接受宝宝接种疫苗提醒、了解最新疫苗咨询</span></p></div></div>'; LODOP=getLodop(); LODOP.SET_LICENSES("安徽奇兵医学科技有限公司","56E2EB898EE17DEBD030D1E8A683CAFE","安徽奇兵醫學科技有限公司","423D486AF17E2120FEB7B2BDDF66F396"); LODOP.SET_LICENSES("THIRD LICENSE","","AnHui Ace-power Medical and Technology Co., Ltd","709251107F8D9D680D1A81F88BED121F"); LODOP.PRINT_INIT("微信关注二维码"); LODOP.ADD_PRINT_HTM("-10",0,"100%","100%",html); LODOP.ADD_PRINT_BARCODE("40mm","26mm","30mm","30mm","QRCode","http://www.chinavacc.cn/wpwx/child/attenT.do?id=" + data.name.id); LODOP.ADD_PRINT_HTM("60mm",0,"100%","100%",html2); LODOP.SET_PRINT_PAGESIZE(3,800,"",""); LODOP.SET_PRINTER_INDEX("XP-80C"); // LODOP.PREVIEW(); LODOP.PRINT(); } if(!hasPrint){ console.error("未找到小票打印机XP-80C,打印小票失败"); layer.msg("未找到小票打印机XP-80C,打印小票失败",{icon:1,offsetTop: 500}); } return true; }
t(LODOP) { re
identifier_name
printtips.js
//获取打印机数量 function getPrinterCount(LODOP) { return LODOP.GET_PRINTER_COUNT(); }; //获取打印机 function getPrinterName(LODOP,iPrinterNO) { return LODOP.GET_PRI
,vac,price,pay,time,insurance function printtips(data){ if(!data){ layer.msg("参数异常,打印失败",{icon:1}); return false; } var pinstr; if(data.pin=="1"){ pinstr = "第一剂次" } if(data.pin=="2"){ pinstr = "第二剂次" } if(data.pin=="3"){ pinstr = "第三剂次" } if(data.pin=="4"){ pinstr = "第四剂次" } if(data.pin=="5"){ pinstr = "第五剂次" } if(data.impart.group == '17' && data.pin=="1"){ pinstr = "第三剂次" } if(data.impart.group == '17' && data.pin=="2"){ pinstr = "第四剂次" } var birthday = new Date(data.name.birthday.replace(/-/g,"/")); var now = new Date(); var printArgs = new Object(); printArgs["roomCode"] = data.no.substring(0,1); printArgs["no"] = data.no; printArgs["childCode"] = data.name.childcode; printArgs["childName"] = data.name.childname; printArgs["birth"] = data.name.birthday; printArgs["vaccName"] = data.vac; printArgs["companyName"] = data.product.manufacturer; printArgs["batch"] = data.product.batchno; printArgs["price"] = data.price; printArgs["pay"] = data.pay==0?"未付款":data.pay; printArgs["createTime"] = data.time; printArgs["wait"] = data.wait<0?0:data.wait; printArgs["disTitle"] = data.impart.title; printArgs["disPin"] = pinstr; printArgs["disBirth"] = birthday.getFullYear()+ '年' + (birthday.getMonth()+1) + '月' + birthday.getDate() + '日'; printArgs["disInTime"] = now.getFullYear()+ '年' + (now.getMonth()+1) + '月' + now.getDate() + '日'; console.info("当前os版本" + gs.ClientOs()); if(gs.ClientOs().indexOf("Win") < 0){ document.location = "js://printtipview?sign=" + (data.sign?1:0) +"&data="+escape(JSON.stringify(printArgs)); console.info("调用安卓打印,任务结束"); return true; } var html = ""; var hh = 10; if(data.type=='reserve'){ html = '<div style="height: 5px;width: 250px"></div>'+ '<div style="font-family: 微软雅黑;width: 250px">'+ '<div style="width: 250px;height: auto;border-top:1px solid #000;border-bottom:1px dashed #000;position: relative;margin:10px;">'+ '<span style="text-align: center;position: absolute; top: -13px;background: #fff;left: 38%;padding: 0 10px; z-index: 999">智慧接种</span>'+ '<p style="margin: 0; line-height: 22px;font-size: 12px;padding-left: 5px; margin-top:8px;"><span>编号:</span>' + data.no.substring(0,1) + '</p>'+ '<p style="margin: 0; line-height: 22px;font-size: 12px;padding-left: 5px;"><span>姓名:</span>' + data.name.childname + '(' + data.name.birthday + ')</p>'+ '<p style="margin: 0; line-height: 22px;font-size: 12px;padding-left: 5px;"><span>疫苗:</span>' + data.vaccName + '</p><p>&nbsp;</p>'+ '</div>'+ '</div>'; hh = 420; }else{ var paradoxicalreaction = data.name.paradoxicalreaction; if(!paradoxicalreaction){ paradoxicalreaction = "无"; } html = '<div style="height: 5px;width: 250px"></div>'+ '<div style="font-family: 微软雅黑;width: 250px">'; if(data.no){ hh += 700; html +='<div style="width: 250px;height: auto;border-top:1px solid #000;border-bottom:1px dashed #000;position: relative;margin:10px 10px 10px 10px;">'+ '<span style="text-align: center;position: absolute; top: -13px;background: #fff;left: 38%;padding: 0 10px; z-index: 999">智慧接种</span>'+ '<h2 style="text-align: center;margin-top:8px; margin-bottom: 8px;">第#noo#接种室 &nbsp;&nbsp;#no#</h2>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>儿童编号:</span>#childcode#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>儿童姓名:</span>#name#</p>'; if(data.pay==0 && data.price){ html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗名称:</span>#vac#</p>'; }else{ html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗名称:</span>#vac#</p>'; } html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px; width:160px;"><span>疫苗厂家:</span>#manu#</p>'; html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗批号:</span>#batch#</p>'; html +='<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗价格:</span>¥#price#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>付费状态:</span>#pay#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>排号时间:</span>#time#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>等待人数:</span>#insurance#</p><hr>' } // '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>保险:</span>#insurance#</p>'+ if(data.impart.title){ hh = hh+ 800; html += '<p style="margin: 0; margin-top:10px; line-height: 22px;font-size: 14x;text-align: center;"><span><strong>' + data.impart.title + '</strong></span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 14px;text-align: center;"><span><strong>接种知情告知书回执(' + pinstr + ')</strong></span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; padding-top: 15px;"><span>受种者姓名:<span style="text-decoration: underline;">&nbsp;&nbsp;' + data.name.childname + '&nbsp;&nbsp;</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>出生日期:'+birthday.getFullYear()+ '年' + (birthday.getMonth()+1) + '月' + birthday.getDate() + '日</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span><strong>健康状况</strong></span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>1.近期是否发热>37.5C、急性传染病。(无)</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>2.以往有无过敏史(' + paradoxicalreaction + ')</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>3.有无癫痫病、神经系统疾病史及惊厥史。(无)</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>4.是否患有严重慢性疾病。(无)</span></p>'; if(!data.sign){ html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;padding-top: 5px;"><span>家长或监护人签字:<span style="text-decoration: underline;">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span></span></p>' html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>接种时间:'+now.getFullYear()+ '年' + (now.getMonth()+1) + '月' + now.getDate() + '日</span></p>' } if(data.sign){ hh = hh + 200; html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; padding-top: 5px;"><span>家长或监护人签字:<span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span></span></p>' html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; margin-top:80px;"><span>接种时间:'+now.getFullYear()+ '年' + (now.getMonth()+1) + '月' + now.getDate() + '日</span></p>' } // if(data.impart.choose && data.impart.choose != 'normal'){ // hh = hh + 200; // var cho = data.impart.choose; // html += cho.replace(new RegExp("#","gm"),"\""); // } hh = hh + 100; html += '<p style="margin: 0; line-height: 20px;font-size: 14px;padding-left: 5px;font-weight: bold;">' + data.vac + '</p>'; html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;">厂家:' + data.product.manufacturer + '&nbsp;&nbsp;批号:' + data.product.batchno + '</p>'; } html += '</div>'+ '</div>'; html = html.replace("#no#",data.no) .replace("#childcode#",data.name.childcode) .replace("#name#",data.name.childname+'(' + data.name.birthday + ')') .replace("#vac#",data.vac) .replace("#price#",data.price) .replace("#pay#",data.pay==0?"未付款":data.pay) .replace("#time#",data.time) .replace("#insurance#",data.wait<0?0:data.wait); html = html.replace("#noo#",data.no.substring(0,1)).replace("#manu#",data.manu); html = html.replace("#batch#",(data.product.batchno)); } var LODOP; //声明为全局变量 try{ LODOP = getLodop(); } catch(err) { console.error("打印控件出错"); }; if (LODOP == null || LODOP ==''){ return false; } LODOP.SET_LICENSES("安徽奇兵医学科技有限公司","56E2EB898EE17DEBD030D1E8A683CAFE","安徽奇兵醫學科技有限公司","423D486AF17E2120FEB7B2BDDF66F396"); LODOP.SET_LICENSES("THIRD LICENSE","","AnHui Ace-power Medical and Technology Co., Ltd","709251107F8D9D680D1A81F88BED121F"); LODOP.PRINT_INIT("登记台排号小票"); LODOP.ADD_PRINT_HTM(0,0,"100%","120%",html); LODOP.SET_PRINT_PAGESIZE(1,900,2100,""); if(preference.quickOption != 1 && data.pay && data.pay==0 && data.price){ LODOP.ADD_PRINT_BARCODE("30mm","50mm","28mm","28mm","QRCode","C_"+data.no + "_" + data.name.localCode); console.info("C_"+data.no + "_" + data.name.localCode); } if(data.sign && preference.quickOption != 1){ LODOP.ADD_PRINT_IMAGE("142mm","15mm",180,67.5,"data:image/png;base64,"+data.sign); LODOP.SET_PRINT_STYLEA(0,"Stretch",2); }else if(data.sign && preference.quickOption == 1){ LODOP.ADD_PRINT_IMAGE((hh-800)+"px","15mm",180,67.5,"data:image/png;base64,"+data.sign); LODOP.SET_PRINT_STYLEA(0,"Stretch",2); } var hasPrint = false; for(var i = 0; i < getPrinterCount(LODOP); i ++){ if("XP-80C" == getPrinterName(LODOP,i)){ console.info("找到打印机XP-80C-->"+i); hasPrint = true; LODOP.SET_PRINTER_INDEX("XP-80C"); // LODOP.PREVIEW(); LODOP.PRINT(); } } /* 打印关注二维码 */ if(hasPrint && data.nextTime && data.nextVacc){ html = '<div style="height: 5px;width: 250px"></div>'+ '<div style="font-family: 微软雅黑;width: 250px">'+ '<div style="width: 250px;height: auto;border-top:1px solid #000;border-bottom:0px dashed #000;position: relative;margin:15px 10px 10px 10px;">'+ '<span style="text-align: center;position: absolute; top: -13px;background: #fff;left: 38%;padding: 0 10px; z-index: 999">智慧接种</span>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; padding-top: 18px;"><span>宝宝姓名:</span>' + data.name.childname + '</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>下次接种日期:</span>' + data.nextTime + '</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>预&ensp;约&ensp;时&ensp;间&nbsp;:</span>' + data.selectTime + '</p>'; html+='<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>下次接种疫苗:</span><br/>&nbsp;&nbsp;&nbsp;&nbsp;' + data.nextVacc + '</p></div></div>'; html2 ='<div style="font-family: 微软雅黑;width: 250px">'+ '<div style="width: 250px;height: auto;border-top:0px;border-bottom:1px dashed #000;position: relative;margin:0 10px 10px 10px;">'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>扫码关注公众号,即可随时接受宝宝接种疫苗提醒、了解最新疫苗咨询</span></p></div></div>'; LODOP=getLodop(); LODOP.SET_LICENSES("安徽奇兵医学科技有限公司","56E2EB898EE17DEBD030D1E8A683CAFE","安徽奇兵醫學科技有限公司","423D486AF17E2120FEB7B2BDDF66F396"); LODOP.SET_LICENSES("THIRD LICENSE","","AnHui Ace-power Medical and Technology Co., Ltd","709251107F8D9D680D1A81F88BED121F"); LODOP.PRINT_INIT("微信关注二维码"); LODOP.ADD_PRINT_HTM("-10",0,"100%","100%",html); LODOP.ADD_PRINT_BARCODE("40mm","26mm","30mm","30mm","QRCode","http://www.chinavacc.cn/wpwx/child/attenT.do?id=" + data.name.id); LODOP.ADD_PRINT_HTM("60mm",0,"100%","100%",html2); LODOP.SET_PRINT_PAGESIZE(3,800,"",""); LODOP.SET_PRINTER_INDEX("XP-80C"); // LODOP.PREVIEW(); LODOP.PRINT(); } if(!hasPrint){ console.error("未找到小票打印机XP-80C,打印小票失败"); layer.msg("未找到小票打印机XP-80C,打印小票失败",{icon:1,offsetTop: 500}); } return true; }
NTER_NAME(iPrinterNO); }; /** 小票打印 */ //no,name
identifier_body
printtips.js
//获取打印机数量 function getPrinterCount(LODOP) { return LODOP.GET_PRINTER_COUNT(); }; //获取打印机 function getPrinterName(LODOP,iPrinterNO) { return LODOP.GET_PRINTER_NAME(iPrinterNO); };
layer.msg("参数异常,打印失败",{icon:1}); return false; } var pinstr; if(data.pin=="1"){ pinstr = "第一剂次" } if(data.pin=="2"){ pinstr = "第二剂次" } if(data.pin=="3"){ pinstr = "第三剂次" } if(data.pin=="4"){ pinstr = "第四剂次" } if(data.pin=="5"){ pinstr = "第五剂次" } if(data.impart.group == '17' && data.pin=="1"){ pinstr = "第三剂次" } if(data.impart.group == '17' && data.pin=="2"){ pinstr = "第四剂次" } var birthday = new Date(data.name.birthday.replace(/-/g,"/")); var now = new Date(); var printArgs = new Object(); printArgs["roomCode"] = data.no.substring(0,1); printArgs["no"] = data.no; printArgs["childCode"] = data.name.childcode; printArgs["childName"] = data.name.childname; printArgs["birth"] = data.name.birthday; printArgs["vaccName"] = data.vac; printArgs["companyName"] = data.product.manufacturer; printArgs["batch"] = data.product.batchno; printArgs["price"] = data.price; printArgs["pay"] = data.pay==0?"未付款":data.pay; printArgs["createTime"] = data.time; printArgs["wait"] = data.wait<0?0:data.wait; printArgs["disTitle"] = data.impart.title; printArgs["disPin"] = pinstr; printArgs["disBirth"] = birthday.getFullYear()+ '年' + (birthday.getMonth()+1) + '月' + birthday.getDate() + '日'; printArgs["disInTime"] = now.getFullYear()+ '年' + (now.getMonth()+1) + '月' + now.getDate() + '日'; console.info("当前os版本" + gs.ClientOs()); if(gs.ClientOs().indexOf("Win") < 0){ document.location = "js://printtipview?sign=" + (data.sign?1:0) +"&data="+escape(JSON.stringify(printArgs)); console.info("调用安卓打印,任务结束"); return true; } var html = ""; var hh = 10; if(data.type=='reserve'){ html = '<div style="height: 5px;width: 250px"></div>'+ '<div style="font-family: 微软雅黑;width: 250px">'+ '<div style="width: 250px;height: auto;border-top:1px solid #000;border-bottom:1px dashed #000;position: relative;margin:10px;">'+ '<span style="text-align: center;position: absolute; top: -13px;background: #fff;left: 38%;padding: 0 10px; z-index: 999">智慧接种</span>'+ '<p style="margin: 0; line-height: 22px;font-size: 12px;padding-left: 5px; margin-top:8px;"><span>编号:</span>' + data.no.substring(0,1) + '</p>'+ '<p style="margin: 0; line-height: 22px;font-size: 12px;padding-left: 5px;"><span>姓名:</span>' + data.name.childname + '(' + data.name.birthday + ')</p>'+ '<p style="margin: 0; line-height: 22px;font-size: 12px;padding-left: 5px;"><span>疫苗:</span>' + data.vaccName + '</p><p>&nbsp;</p>'+ '</div>'+ '</div>'; hh = 420; }else{ var paradoxicalreaction = data.name.paradoxicalreaction; if(!paradoxicalreaction){ paradoxicalreaction = "无"; } html = '<div style="height: 5px;width: 250px"></div>'+ '<div style="font-family: 微软雅黑;width: 250px">'; if(data.no){ hh += 700; html +='<div style="width: 250px;height: auto;border-top:1px solid #000;border-bottom:1px dashed #000;position: relative;margin:10px 10px 10px 10px;">'+ '<span style="text-align: center;position: absolute; top: -13px;background: #fff;left: 38%;padding: 0 10px; z-index: 999">智慧接种</span>'+ '<h2 style="text-align: center;margin-top:8px; margin-bottom: 8px;">第#noo#接种室 &nbsp;&nbsp;#no#</h2>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>儿童编号:</span>#childcode#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>儿童姓名:</span>#name#</p>'; if(data.pay==0 && data.price){ html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗名称:</span>#vac#</p>'; }else{ html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗名称:</span>#vac#</p>'; } html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px; width:160px;"><span>疫苗厂家:</span>#manu#</p>'; html += '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗批号:</span>#batch#</p>'; html +='<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>疫苗价格:</span>¥#price#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>付费状态:</span>#pay#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>排号时间:</span>#time#</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>等待人数:</span>#insurance#</p><hr>' } // '<p style="margin: 0; line-height: 20px;font-size: 12px;padding-left: 5px;"><span>保险:</span>#insurance#</p>'+ if(data.impart.title){ hh = hh+ 800; html += '<p style="margin: 0; margin-top:10px; line-height: 22px;font-size: 14x;text-align: center;"><span><strong>' + data.impart.title + '</strong></span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 14px;text-align: center;"><span><strong>接种知情告知书回执(' + pinstr + ')</strong></span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; padding-top: 15px;"><span>受种者姓名:<span style="text-decoration: underline;">&nbsp;&nbsp;' + data.name.childname + '&nbsp;&nbsp;</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>出生日期:'+birthday.getFullYear()+ '年' + (birthday.getMonth()+1) + '月' + birthday.getDate() + '日</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span><strong>健康状况</strong></span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>1.近期是否发热>37.5C、急性传染病。(无)</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>2.以往有无过敏史(' + paradoxicalreaction + ')</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>3.有无癫痫病、神经系统疾病史及惊厥史。(无)</span></p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;"><span>4.是否患有严重慢性疾病。(无)</span></p>'; if(!data.sign){ html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;padding-top: 5px;"><span>家长或监护人签字:<span style="text-decoration: underline;">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span></span></p>' html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>接种时间:'+now.getFullYear()+ '年' + (now.getMonth()+1) + '月' + now.getDate() + '日</span></p>' } if(data.sign){ hh = hh + 200; html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; padding-top: 5px;"><span>家长或监护人签字:<span>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span></span></p>' html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; margin-top:80px;"><span>接种时间:'+now.getFullYear()+ '年' + (now.getMonth()+1) + '月' + now.getDate() + '日</span></p>' } // if(data.impart.choose && data.impart.choose != 'normal'){ // hh = hh + 200; // var cho = data.impart.choose; // html += cho.replace(new RegExp("#","gm"),"\""); // } hh = hh + 100; html += '<p style="margin: 0; line-height: 20px;font-size: 14px;padding-left: 5px;font-weight: bold;">' + data.vac + '</p>'; html += '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px;">厂家:' + data.product.manufacturer + '&nbsp;&nbsp;批号:' + data.product.batchno + '</p>'; } html += '</div>'+ '</div>'; html = html.replace("#no#",data.no) .replace("#childcode#",data.name.childcode) .replace("#name#",data.name.childname+'(' + data.name.birthday + ')') .replace("#vac#",data.vac) .replace("#price#",data.price) .replace("#pay#",data.pay==0?"未付款":data.pay) .replace("#time#",data.time) .replace("#insurance#",data.wait<0?0:data.wait); html = html.replace("#noo#",data.no.substring(0,1)).replace("#manu#",data.manu); html = html.replace("#batch#",(data.product.batchno)); } var LODOP; //声明为全局变量 try{ LODOP = getLodop(); } catch(err) { console.error("打印控件出错"); }; if (LODOP == null || LODOP ==''){ return false; } LODOP.SET_LICENSES("安徽奇兵医学科技有限公司","56E2EB898EE17DEBD030D1E8A683CAFE","安徽奇兵醫學科技有限公司","423D486AF17E2120FEB7B2BDDF66F396"); LODOP.SET_LICENSES("THIRD LICENSE","","AnHui Ace-power Medical and Technology Co., Ltd","709251107F8D9D680D1A81F88BED121F"); LODOP.PRINT_INIT("登记台排号小票"); LODOP.ADD_PRINT_HTM(0,0,"100%","120%",html); LODOP.SET_PRINT_PAGESIZE(1,900,2100,""); if(preference.quickOption != 1 && data.pay && data.pay==0 && data.price){ LODOP.ADD_PRINT_BARCODE("30mm","50mm","28mm","28mm","QRCode","C_"+data.no + "_" + data.name.localCode); console.info("C_"+data.no + "_" + data.name.localCode); } if(data.sign && preference.quickOption != 1){ LODOP.ADD_PRINT_IMAGE("142mm","15mm",180,67.5,"data:image/png;base64,"+data.sign); LODOP.SET_PRINT_STYLEA(0,"Stretch",2); }else if(data.sign && preference.quickOption == 1){ LODOP.ADD_PRINT_IMAGE((hh-800)+"px","15mm",180,67.5,"data:image/png;base64,"+data.sign); LODOP.SET_PRINT_STYLEA(0,"Stretch",2); } var hasPrint = false; for(var i = 0; i < getPrinterCount(LODOP); i ++){ if("XP-80C" == getPrinterName(LODOP,i)){ console.info("找到打印机XP-80C-->"+i); hasPrint = true; LODOP.SET_PRINTER_INDEX("XP-80C"); // LODOP.PREVIEW(); LODOP.PRINT(); } } /* 打印关注二维码 */ if(hasPrint && data.nextTime && data.nextVacc){ html = '<div style="height: 5px;width: 250px"></div>'+ '<div style="font-family: 微软雅黑;width: 250px">'+ '<div style="width: 250px;height: auto;border-top:1px solid #000;border-bottom:0px dashed #000;position: relative;margin:15px 10px 10px 10px;">'+ '<span style="text-align: center;position: absolute; top: -13px;background: #fff;left: 38%;padding: 0 10px; z-index: 999">智慧接种</span>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; padding-top: 18px;"><span>宝宝姓名:</span>' + data.name.childname + '</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>下次接种日期:</span>' + data.nextTime + '</p>'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>预&ensp;约&ensp;时&ensp;间&nbsp;:</span>' + data.selectTime + '</p>'; html+='<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>下次接种疫苗:</span><br/>&nbsp;&nbsp;&nbsp;&nbsp;' + data.nextVacc + '</p></div></div>'; html2 ='<div style="font-family: 微软雅黑;width: 250px">'+ '<div style="width: 250px;height: auto;border-top:0px;border-bottom:1px dashed #000;position: relative;margin:0 10px 10px 10px;">'+ '<p style="margin: 0; line-height: 20px;font-size: 13px;padding-left: 5px; "><span>扫码关注公众号,即可随时接受宝宝接种疫苗提醒、了解最新疫苗咨询</span></p></div></div>'; LODOP=getLodop(); LODOP.SET_LICENSES("安徽奇兵医学科技有限公司","56E2EB898EE17DEBD030D1E8A683CAFE","安徽奇兵醫學科技有限公司","423D486AF17E2120FEB7B2BDDF66F396"); LODOP.SET_LICENSES("THIRD LICENSE","","AnHui Ace-power Medical and Technology Co., Ltd","709251107F8D9D680D1A81F88BED121F"); LODOP.PRINT_INIT("微信关注二维码"); LODOP.ADD_PRINT_HTM("-10",0,"100%","100%",html); LODOP.ADD_PRINT_BARCODE("40mm","26mm","30mm","30mm","QRCode","http://www.chinavacc.cn/wpwx/child/attenT.do?id=" + data.name.id); LODOP.ADD_PRINT_HTM("60mm",0,"100%","100%",html2); LODOP.SET_PRINT_PAGESIZE(3,800,"",""); LODOP.SET_PRINTER_INDEX("XP-80C"); // LODOP.PREVIEW(); LODOP.PRINT(); } if(!hasPrint){ console.error("未找到小票打印机XP-80C,打印小票失败"); layer.msg("未找到小票打印机XP-80C,打印小票失败",{icon:1,offsetTop: 500}); } return true; }
/** 小票打印 */ //no,name,vac,price,pay,time,insurance function printtips(data){ if(!data){
random_line_split
Rs.js
Ext.define('casco.view.rs.Rs', { extend: 'Ext.grid.Panel', xtype: 'rs', viewModel: 'main', requires: [ 'casco.store.Versions', 'casco.store.Rss', 'casco.view.rs.RsImport', 'casco.view.rs.RsDetails', ], columnLines: true, selModel: { injectCheckbox: 0, mode: "MULTI", //"SINGLE"/"SIMPLE"/"MULTI" checkOnly: true //只能通过checkbox选择 }, selType: "checkboxmodel", initComponent: function() { var me = this; me.versions = new casco.store.Versions(); me.store = new casco.store.Rss(); me.store_rs = new casco.store.Rss(); me.versions.load({ params: { document_id: me.document.id }, synchronous: true, callback: function() { me.down('combobox').select(me.versions.getAt(0)); //取最近的版本 var latest_v = me.versions.getCount() > 0 ? me.versions.getAt(0) : 0; me.curr_version = latest_v; if (latest_v) { me.store_rs.load({ scope: this, synchronous: true, params: { version_id: latest_v.get('id') }, callback: function() { me.columns = me.store_rs.getAt(0).get('columModle'); me.ds = new Ext.data.JsonStore({ data: (me.store_rs.getAt(0).get('data')), fields: (me.store_rs.getAt(0).get('fieldsNames')) }); me.store_rs.setData(me.ds.getData()); me.reconfigure(me.store_rs, me.columns); } }); } } }); me.tbar = [{ xtype: 'combobox', id: 'docv-' + me.document.id, fieldLabel: '版本', labelWidth: 50, store: me.versions, displayField: 'name', valueField: 'id', width: 200, queryMode: 'local', editable: true, lastQuery: '', listeners: { select: function(combo, record) { me.curr_version = record; Ext.Ajax.request({ url: API + 'rs', params: { version_id: record.get('id') }, method: 'get', async: false, callback: function(options, success, response) { me.json = new Ext.util.JSON.decode(response.responseText); } }); me.ds = new Ext.data.JsonStore({ data: me.json.data, fields: me.json.fieldsNames }); me.columns = me.json.columModle; me.store.setData(me.ds.getData()); me.reconfigure(me.store, me.columns); //用columns 对grid panel 重载 }, beforequery: function(e) { e.query = new RegExp(e.query.trim(), 'i'); //去除string两端空格 e.forceAll = true; } } }, '-', { text: '导入', glyph: 0xf093, scope: this, handler: function() { var win = Ext.create('widget.rs.rsimport', { listeners: { scope: this }, document_id: me.document.id, document: me.document, project: me.project, vstore: me.versions, type: 'rs' }); win.show(); } }, '-', { text: '查看统计', glyph: 0xf080, scope: this, handler: function() { window.open('/stat/cover.htm#' + me.curr_version.get('id')); } }, '-', { text: '版本', glyph: 0xf05a, border: true, width: 110, handler: function() { var win = Ext.create('casco.view.manage.Versions', {'document_id': me.document.id, 'edit': 1}); win.show(); } }, '-', { text: '批量编辑定版', glyph: 0xf05a, border: true, width: 150, handler: function() { var win = Ext.create('casco.view.rs.MultiVats', { 'project': me.project, 'father': me, 'version': me.curr_version }); win.show(); } }, '->', { xtype: 'textfield', fieldLabel: '搜索', labelWidth: 50, name: 'searchField', emptyText: '搜索', hideLabel: true, width: 200, listeners: { change: { fn: me.onTextFieldChange, scope: this, buffer: 500 } } }, { xtype: 'button', text: '&lt;', tooltip: '往前查找', handler: me.onPreviousClick, scope: me }, { xtype: 'button', text: '&gt;', tooltip: '往后查找', handler: me.onNextClick, scope: me }, { xtype: 'checkbox', hideLabel: true, margin: '0 12px 0 0', handler: me.caseSensitiveToggle, scope: me }, ' 区分大小写']; me.bbar = [{ xtype: 'statusbar', defaultText: me.defaultStatusText, name: 'searchStatusBar' }]; var onDelete = function() { var grid = me; Ext.MessageBox.confirm('Confirm delete', 'Are you sure?', function(btn) { if (btn == 'yes') { var view = me.getView(); me.reconfigure(me.store, me.columns); var selection = view.getSelectionModel().getSelection()[0]; var tc = Ext.create('casco.model.Rs', {id: selection.get('id')}); tc.erase(); if (selection) { me.store.remove(selection); selection.erase(); } me.getView().refresh(); } }); }; var onInsertRecord = function() { var tag = ''; me.store.each(function(record) { //取MaxTag+1 if (record.get('tag') > tag) tag = record.get('tag'); }, this); var re = /^\[(.*)?\]$/g; if (re.test(tag)) { var suffix = tag.toString().match(/[^\d]+/g); num = parseInt(tag.toString().match(/\d+/g)) + 1; tag = suffix[0] + num + suffix[1]; } else { tag = null; } var win = Ext.create('widget.rs.rsdetails', { status: 0, pointer: me, rs: Ext.create('casco.model.Rs'), columns: me.columns, version_id: me.curr_version.get('id'), tag_id: tag, project: me.project, document_id: me.document.id }); win.show(); }; me.listeners = { itemcontextmenu: function(view, record, item, index, e) { e.stopEvent(); var grid = me; if (!grid.rowCtxMenu) { grid.rowCtxMenu = Ext.create('Ext.menu.Menu', { items: [{ text: '插入记录', handler: onInsertRecord, }, { text: '删除记录', handler: onDelete }] }); }//if grid.selModel.select(record); grid.rowCtxMenu.showAt(e.getXY()); }, destroy: function(thisGrid) { if (thisGrid.rowCtxMenu) { thisGrid.rowCtxMenu.destroy(); } }, celldblclick: function(a, b, c, record) { localStorage.tag = record.get('tag'); console.log('列' + c); if (c == 0 || c == 1) { window.open('/draw/graph2.html#' + record.get('tag') + "&id=" + record.get('id')); return; } var win = Ext.create('widget.rs.rsdetails', { status: 1, rs: record, pointer: me, document_id: me.document_id, project: me.project, columns: me.columns, }); win.down('form').loadRecord(record); win.show(); } }; me.callParent(arguments); }, /* * Live Search Module Cofigures */ bufferedRenderer: false, //searchlive need searchValue: null, //search value initialization indexes: [], //The row indexes where matching strings are found. (used by previous and next buttons) searchRegExp: null, //The generated regular expression used for searching. caseSensitive: false, //Case sensitive mode. regExpMode: false, //Regular expression mode. tagsRe: /<[^>]*>/gm, //detects html tag gm 参数 tagsProtect: '\x0f', //DEL ASCII code matchCls: 'x-livesearch-match', //@cfg {String} matchCls The matched string css classe. defaultStatusText: '无匹配结果', afterRender: function() { var me = this; me.callParent(arguments); me.textField = me.down('textfield[name=searchField]'); me.statusBar = me.down('statusbar[name=searchStatusBar]'); }, focusTextField: function(view, td, cellIndex, record, tr, rowIndex, e, eOpts) { if (e.getKey() === e.S) { e.preventDefault(); this.textField.focus(); } }, getSearchValue: function() { var me = this, value = me.textField.getValue(); if (value === '') { return null; } if (!me.regExpMode) { value = Ext.String.escapeRegex(value); } else { try { new RegExp(value); } catch (error) { me.statusBar.setStatus({ text: error.message, iconCls: 'x-status-error' }); return null; } // this is stupid if (value === '^' || value === '$') { return null; } } return value; }, onTextFieldChange: function() { var me = this, count = 0, view =
view.cellSelector, innerSelector = view.innerSelector; view.refresh(); // reset the statusbar me.statusBar.setStatus({ text: me.defaultStatusText, iconCls: '' }); me.searchValue = me.getSearchValue(); me.indexes = []; me.currentIndex = null; if (me.searchValue !== null) { me.searchRegExp = new RegExp(me.getSearchValue(), 'g' + (me.caseSensitive ? '' : 'i')); me.store.each(function(record, idx) { var td = Ext.fly(view.getNode(idx)).down(cellSelector), cell, matches, cellHTML; while (td) { cell = td.down(innerSelector); matches = cell.dom.innerHTML.match(me.tagsRe); cellHTML = cell.dom.innerHTML.replace(me.tagsRe, me.tagsProtect); // populate indexes array, set currentIndex, and replace wrap matched string in a span cellHTML = cellHTML.replace(me.searchRegExp, function(m) { count += 1; if (Ext.Array.indexOf(me.indexes, idx) === -1) { me.indexes.push(idx); } if (me.currentIndex === null) { me.currentIndex = idx; } return '<span class="' + me.matchCls + '">' + m + '</span>'; }); // restore protected tags Ext.each(matches, function(match) { cellHTML = cellHTML.replace(me.tagsProtect, match); }); // update cell html cell.dom.innerHTML = cellHTML; td = td.next(); } }, me); // results found if (me.currentIndex !== null) { // console.log(me.currentIndex); me.getSelectionModel().select(me.currentIndex); // Ext.fly(me.getView().getNode(me.currentIndex)).scrollInteView(); me.getView().focusRow(me.currentIndex); me.statusBar.setStatus({ text: count + ' 处匹配', iconCls: 'x-status-valid' }); } } // no results found if (me.currentIndex === null) { me.getSelectionModel().deselectAll(); } me.textField.focus(); }, onPreviousClick: function() { var me = this, idx; if ((idx = Ext.Array.indexOf(me.indexes, me.currentIndex)) !== -1) { me.currentIndex = me.indexes[idx - 1] || me.indexes[me.indexes.length - 1]; me.getSelectionModel().select(me.currentIndex); me.getView().focusRow(me.currentIndex); } }, onNextClick: function() { var me = this, idx; if ((idx = Ext.Array.indexOf(me.indexes, me.currentIndex)) !== -1) { me.currentIndex = me.indexes[idx + 1] || me.indexes[0]; me.getSelectionModel().select(me.currentIndex); me.getView().focusRow(me.currentIndex); } }, caseSensitiveToggle: function(checkbox, checked) { this.caseSensitive = checked; this.onTextFieldChange(); } })
me.view, cellSelector =
conditional_block
Rs.js
Ext.define('casco.view.rs.Rs', { extend: 'Ext.grid.Panel', xtype: 'rs', viewModel: 'main', requires: [ 'casco.store.Versions', 'casco.store.Rss', 'casco.view.rs.RsImport', 'casco.view.rs.RsDetails', ], columnLines: true, selModel: { injectCheckbox: 0, mode: "MULTI", //"SINGLE"/"SIMPLE"/"MULTI" checkOnly: true //只能通过checkbox选择 }, selType: "checkboxmodel", initComponent: function() { var me = this; me.versions = new casco.store.Versions(); me.store = new casco.store.Rss(); me.store_rs = new casco.store.Rss(); me.versions.load({ params: { document_id: me.document.id }, synchronous: true, callback: function() { me.down('combobox').select(me.versions.getAt(0)); //取最近的版本 var latest_v = me.versions.getCount() > 0 ? me.versions.getAt(0) : 0; me.curr_version = latest_v; if (latest_v) { me.store_rs.load({ scope: this, synchronous: true, params: { version_id: latest_v.get('id') }, callback: function() { me.columns = me.store_rs.getAt(0).get('columModle'); me.ds = new Ext.data.JsonStore({ data: (me.store_rs.getAt(0).get('data')), fields: (me.store_rs.getAt(0).get('fieldsNames')) }); me.store_rs.setData(me.ds.getData()); me.reconfigure(me.store_rs, me.columns); } }); } } }); me.tbar = [{ xtype: 'combobox', id: 'docv-' + me.document.id, fieldLabel: '版本', labelWidth: 50, store: me.versions, displayField: 'name', valueField: 'id', width: 200, queryMode: 'local', editable: true, lastQuery: '', listeners: { select: function(combo, record) { me.curr_version = record; Ext.Ajax.request({ url: API + 'rs', params: { version_id: record.get('id') }, method: 'get', async: false, callback: function(options, success, response) { me.json = new Ext.util.JSON.decode(response.responseText); } }); me.ds = new Ext.data.JsonStore({ data: me.json.data, fields: me.json.fieldsNames }); me.columns = me.json.columModle; me.store.setData(me.ds.getData()); me.reconfigure(me.store, me.columns); //用columns 对grid panel 重载 }, beforequery: function(e) { e.query = new RegExp(e.query.trim(), 'i'); //去除string两端空格 e.forceAll = true; } } }, '-', { text: '导入', glyph: 0xf093, scope: this, handler: function() { var win = Ext.create('widget.rs.rsimport', { listeners: { scope: this }, document_id: me.document.id, document: me.document, project: me.project, vstore: me.versions, type: 'rs' }); win.show(); } }, '-', { text: '查看统计', glyph: 0xf080, scope: this, handler: function() { window.open('/stat/cover.htm#' + me.curr_version.get('id')); } }, '-', { text: '版本', glyph: 0xf05a, border: true, width: 110, handler: function() { var win = Ext.create('casco.view.manage.Versions', {'document_id': me.document.id, 'edit': 1}); win.show(); } }, '-', { text: '批量编辑定版', glyph: 0xf05a, border: true, width: 150, handler: function() { var win = Ext.create('casco.view.rs.MultiVats', { 'project': me.project, 'father': me, 'version': me.curr_version }); win.show(); } }, '->', { xtype: 'textfield', fieldLabel: '搜索', labelWidth: 50, name: 'searchField', emptyText: '搜索', hideLabel: true, width: 200, listeners: { change: { fn: me.onTextFieldChange, scope: this, buffer: 500 } } }, { xtype: 'button', text: '&lt;', tooltip: '往前查找', handler: me.onPreviousClick, scope: me }, { xtype: 'button', text: '&gt;', tooltip: '往后查找', handler: me.onNextClick, scope: me }, { xtype: 'checkbox', hideLabel: true, margin: '0 12px 0 0', handler: me.caseSensitiveToggle, scope: me }, ' 区分大小写']; me.bbar = [{ xtype: 'statusbar', defaultText: me.defaultStatusText, name: 'searchStatusBar' }]; var onDelete = function() { var grid = me; Ext.MessageBox.confirm('Confirm delete', 'Are you sure?', function(btn) { if (btn == 'yes') { var view = me.getView(); me.reconfigure(me.store, me.columns); var selection = view.getSelectionModel().getSelection()[0]; var tc = Ext.create('casco.model.Rs', {id: selection.get('id')}); tc.erase(); if (selection) { me.store.remove(selection); selection.erase(); } me.getView().refresh(); } }); }; var onInsertRecord = function() { var tag = ''; me.store.each(function(record) { //取MaxTag+1 if (record.get('tag') > tag) tag = record.get('tag'); }, this); var re = /^\[(.*)?\]$/g; if (re.test(tag)) { var suffix = tag.toString().match(/[^\d]+/g); num = parseInt(tag.toString().match(/\d+/g)) + 1; tag = suffix[0] + num + suffix[1]; } else { tag = null; } var win = Ext.create('widget.rs.rsdetails', { status: 0, pointer: me, rs: Ext.create('casco.model.Rs'), columns: me.columns, version_id: me.curr_version.get('id'), tag_id: tag, project: me.project, document_id: me.document.id }); win.show(); }; me.listeners = { itemcontextmenu: function(view, record, item, index, e) { e.stopEvent(); var grid = me; if (!grid.rowCtxMenu) { grid.rowCtxMenu = Ext.create('Ext.menu.Menu', { items: [{ text: '插入记录', handler: onInsertRecord, }, { text: '删除记录', handler: onDelete }] }); }//if grid.selModel.select(record); grid.rowCtxMenu.showAt(e.getXY()); }, destroy: function(thisGrid) { if (thisGrid.rowCtxMenu) { thisGrid.rowCtxMenu.destroy(); } }, celldblclick: function(a, b, c, record) { localStorage.tag = record.get('tag'); console.log('列' + c); if (c == 0 || c == 1) { window.open('/draw/graph2.html#' + record.get('tag') + "&id=" + record.get('id')); return; } var win = Ext.create('widget.rs.rsdetails', { status: 1, rs: record, pointer: me, document_id: me.document_id, project: me.project, columns: me.columns, }); win.down('form').loadRecord(record); win.show(); } }; me.callParent(arguments); }, /* * Live Search Module Cofigures */ bufferedRenderer: false, //searchlive need searchValue: null, //search value initialization indexes: [], //The row indexes where matching strings are found. (used by previous and next buttons) searchRegExp: null, //The generated regular expression used for searching. caseSensitive: false, //Case sensitive mode. regExpMode: false, //Regular expression mode. tagsRe: /<[^>]*>/gm, //detects html tag gm 参数 tagsProtect: '\x0f', //DEL ASCII code matchCls: 'x-livesearch-match', //@cfg {String} matchCls The matched string css classe. defaultStatusText: '无匹配结果', afterRender: function() { var me = this; me.callParent(arguments); me.textField = me.down('textfield[name=searchField]'); me.statusBar = me.down('statusbar[name=searchStatusBar]'); }, focusTextField: function(view, td, cellIndex, record, tr, rowIndex, e, eOpts) { if (e.getKey() === e.S) { e.preventDefault(); this.textField.focus(); } }, getSearchValue: function() { var me = this, value = me.textField.getValue(); if (value === '') { return null; } if (!me.regExpMode) { value = Ext.String.escapeRegex(value); } else { try { new RegExp(value); } catch (error) { me.statusBar.setStatus({ text: error.message, iconCls: 'x-status-error' }); return null; } // this is stupid if (value === '^' || value === '$') { return null; } } return value; }, onTextFieldChange: function() { var me = this, count = 0, view = me.view, cellSelector = view.cellSelector, innerSelector = view.innerSelector; view.refresh(); // reset the statusbar me.statusBar.setStatus({ text: me.defaultStatusText, iconCls: '' }); me.searchValue = me.getSearchValue(); me.indexes = []; me.currentIndex = null; if (me.searchValue !== null) { me.searchRegExp = new RegExp(me.getSearchValue(), 'g' + (me.caseSensitive ? '' : 'i')); me.store.each(function(record, idx) { var td = Ext.fly(view.getNode(idx)).down(cellSelector), cell, matches, cellHTML; while (td) { cell = td.down(innerSelector); matches = cell.dom.innerHTML.match(me.tagsRe); cellHTML = cell.dom.innerHTML.replace(me.tagsRe, me.tagsProtect); // populate indexes array, set currentIndex, and replace wrap matched string in a span cellHTML = cellHTML.replace(me.searchRegExp, function(m) { count += 1; if (Ext.Array.indexOf(me.indexes, idx) === -1) { me.indexes.push(idx); } if (me.currentIndex === null) { me.currentIndex = idx; } return '<span class="' + me.matchCls + '">' + m + '</span>'; }); // restore protected tags Ext.each(matches, function(match) { cellHTML = cellHTML.replace(me.tagsProtect, match); }); // update cell html cell.dom.innerHTML = cellHTML; td = td.next(); } }, me); // results found if (me.currentIndex !== null) { // console.log(me.currentIndex); me.getSelectionModel().select(me.currentIndex); // Ext.fly(me.getView().getNode(me.currentIndex)).scrollInteView(); me.getView().focusRow(me.currentIndex); me.statusBar.setStatus({ text: count + ' 处匹配', iconCls: 'x-status-valid' }); } } // no results found if (me.currentIndex === null) {
me.textField.focus(); }, onPreviousClick: function() { var me = this, idx; if ((idx = Ext.Array.indexOf(me.indexes, me.currentIndex)) !== -1) { me.currentIndex = me.indexes[idx - 1] || me.indexes[me.indexes.length - 1]; me.getSelectionModel().select(me.currentIndex); me.getView().focusRow(me.currentIndex); } }, onNextClick: function() { var me = this, idx; if ((idx = Ext.Array.indexOf(me.indexes, me.currentIndex)) !== -1) { me.currentIndex = me.indexes[idx + 1] || me.indexes[0]; me.getSelectionModel().select(me.currentIndex); me.getView().focusRow(me.currentIndex); } }, caseSensitiveToggle: function(checkbox, checked) { this.caseSensitive = checked; this.onTextFieldChange(); } })
me.getSelectionModel().deselectAll(); }
random_line_split
experiment_util.py
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Shared utilities for experiments in the paper.""" import dataclasses import functools import sys import time import types from typing import Any, Callable, Tuple import jax import jax.numpy as jnp import matplotlib.pyplot as plt import numpy as np import optax from gumbel_max_causal_gadgets import coupling_util NDArray = PRNGKey = Any @dataclasses.dataclass(eq=False) class CouplingExperimentConfig: """Configuration and helper object for coupling experiments. Attributes: name: Name of the experiment model: Model definition (either gadget 1 or 2) logit_pair_distribution_fn: Function that produces random pairs of logits from the distribution D, given a key. coupling_loss_matrix_fn: Function that produces a matrix of penalties for each counterfactual coupling pair, given the two logit vectors. inner_num_samples: How many (relaxed) samples from the coupling to draw for each pair of logits. batch_size: How many pairs of logits to compute losses for at a time. use_transpose: Whether to pass a `transpose` argument to one of the sampled pairs. Should be True if and only if the model is Gadget 1. tx: optax optimizer definition to use. num_steps: How many training steps to use. print_every: Minimum frequency at which to print training progress. Always prints at powers of 2 regardless of this value. metadata: Arbitrary metadata to associate with this experiment. """ name: str model: Any logit_pair_distribution_fn: Callable[[PRNGKey], Tuple[NDArray, NDArray]] coupling_loss_matrix_fn: Callable[[NDArray, NDArray], NDArray] inner_num_samples: int batch_size: int use_transpose: bool tx: Any num_steps: int print_every: int = 100 metadata: Any = None def loss_and_metrics_one_pair(self, params, rng): """Samples a pair of logits, and computes loss and metrics.""" key_pq, key_samples = jax.random.split(rng) p_logits, q_logits = self.logit_pair_distribution_fn(key_pq) def sample_loss(key_sample): """Computes loss for a single sample of a relaxed pair of outcomes.""" q_kwargs = dict(transpose=True) if self.use_transpose else {} soft_p = self.model.apply( params, p_logits, key_sample, method=self.model.sample_relaxed) soft_q = self.model.apply( params, q_logits, key_sample, method=self.model.sample_relaxed, **q_kwargs) coupling_loss_matrix = self.coupling_loss_matrix_fn(p_logits, q_logits) coupling_loss = jnp.sum(soft_p[:, None] * soft_q[None, :] * coupling_loss_matrix) return coupling_loss loss_samples = jax.vmap(sample_loss)( jax.random.split(key_samples, self.inner_num_samples)) loss = jnp.mean(loss_samples) return loss, {"loss": loss} @functools.partial(jax.jit, static_argnums=0) def opt_step(self, opt_state, params, rng): """Performs one training step.""" def batch_loss(params, rng): stuff = jax.vmap(lambda rng: self.loss_and_metrics_one_pair(params, rng))( jax.random.split(rng, self.batch_size)) return jax.tree_map(jnp.mean, stuff) grads, metrics = jax.grad(batch_loss, has_aux=True)(params, rng) updates, new_opt_state = self.tx.update(grads, opt_state, params) new_params = optax.apply_updates(params, updates) any_was_nan = jax.tree_util.tree_reduce( jnp.logical_or, jax.tree_map(lambda v: jnp.any(jnp.isnan(v)), grads)) new_opt_state, new_params = jax.tree_map( lambda a, b: jnp.where(any_was_nan, a, b), (opt_state, params), (new_opt_state, new_params)) return new_opt_state, new_params, metrics, grads, any_was_nan def
(self, rng): """Training loop entry point. Calling this method runs the experiment described by this config, and returns various results collected during training. Args: rng: PRNGKey to use to initialize model and draw training examples. Returns: types.SimpleNamespace containing various results. Of importance: finished_reason: The reason we stopped training. params: The parameters we learned. """ # pylint: disable=possibly-unused-variable rng, init_key = jax.random.split(rng) params = self.model.init(init_key, jnp.zeros([self.model.S_dim])) opt_state = self.tx.init(params) start_time = time.time() count_since_reset = 0 all_metrics = [] try: i = 0 while i < self.num_steps: rng, key = jax.random.split(rng) # Pass the inputs in and take a gradient step. opt_state, params, metrics, grads, bad = self.opt_step( opt_state, params, key) all_metrics.append(jax.tree_map(np.array, metrics)) if bad: finished_reason = "nan" return types.SimpleNamespace(**locals()) count_since_reset += 1 if i % self.print_every == 0 or np.remainder(np.log2(i), 1) == 0: now = time.time() rate = count_since_reset / (now - start_time) start_time = now count_since_reset = 0 print(f"{i} [{rate}/s]:", jax.tree_map(float, metrics)) sys.stdout.flush() time.sleep(0.02) i += 1 except KeyboardInterrupt: finished_reason = "interrupt" return types.SimpleNamespace(**locals()) finished_reason = "done" (opt_state, params) = jax.tree_map(np.array, (opt_state, params)) return types.SimpleNamespace(**locals()) # pylint: enable=possibly-unused-variable def build_sampler(self, params): """Helper to build a joint sampler function for the model.""" def sampler(logits_1, logits_2, key): q_kwargs = dict(transpose=True) if self.use_transpose else {} x = self.model.bind(params).sample(logits_1, key) y = self.model.bind(params).sample(logits_2, key, **q_kwargs) return jnp.zeros([10, 10]).at[x, y].set(1.) return sampler def get_coupling_estimates(experiments, results, seed, logits_1=None, logits_2=None, num_joint_samples=10_000_000, logit_kwargs=None): """Computes couplings for a collection of experiments. All experiments should have the same logit_pair_distribution_fn. Args: experiments: List of experiments to evaluate. results: List of results, produced by calling `train` on each experiment. seed: Seed to use when estimating the coupling. logits_1: Optional logits. If not provided, uses logit_pair_distribution_fn. logits_2: Optional logits. If not provided, uses logit_pair_distribution_fn. num_joint_samples: How many samples to draw from the coupling when estimating it. logit_kwargs: Any keyword arguments that should be passed to the logit pair generator. Returns: (logits_1, logits_2), couplings where `couplings` is a dictionary whose keys include each of the experiments along with baselines, and the values are coupling matrices. """ logits_key, vis_key = jax.random.split(jax.random.PRNGKey(seed)) if logits_1 is None and logits_2 is None: logits_1, logits_2 = experiments[0].logit_pair_distribution_fn( logits_key, **(logit_kwargs or {})) logits_1 -= jax.scipy.special.logsumexp(logits_1) logits_2 -= jax.scipy.special.logsumexp(logits_2) probs_1 = jnp.exp(logits_1) probs_2 = jnp.exp(logits_2) independent_coupling = probs_1[:, None] * probs_2[None, :] gumbel_max_estimate = coupling_util.joint_from_samples( coupling_util.gumbel_max_sampler, logits_1, logits_2, vis_key, num_joint_samples, loop_size=500) icdf = coupling_util.inverse_cdf_coupling(logits_1, logits_2) icdf_perm = coupling_util.permuted_inverse_cdf_coupling(logits_1, logits_2) couplings = { "Independent": independent_coupling, "ICDF": icdf, "ICDF (permuted)": icdf_perm, "Gumbel-max": gumbel_max_estimate, } for experiment, result in zip(experiments, results): couplings[experiment.name] = coupling_util.joint_from_samples( experiment.build_sampler(result.params), logits_1, logits_2, vis_key, num_joint_samples, loop_size=500) return (logits_1, logits_2), couplings def compute_coupling_losses(experiments, logits_1, logits_2, estimated_couplings): """Estimate losses for each experiment. All experiments should have the same coupling_loss_matrix_fn. Args: experiments: List of experiments to evaluate. logits_1: First set of logits. logits_2: Second set of logits, estimated_couplings: A dictionary whose values are coupling matrices. Returns: A dictionary with the same keys as estimated_couplings whose values are estimates of the loss for this coupling. """ test_losses = {} for name, coupling in estimated_couplings.items(): loss_value = jnp.sum( coupling * experiments[0].coupling_loss_matrix_fn(logits_1, logits_2)) test_losses[name] = loss_value return test_losses def visualize_coupling_experiments(loss_values, logits_1, logits_2, couplings): """Visualizes the couplings using matplotlib. Args: loss_values: Loss values from `compute_coupling_losses` logits_1: First set of logits. logits_2: Second set of logits, couplings: A dictionary whose values are coupling matrices. """ ncols = 2 + len(couplings) _, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(4 * ncols, 4)) axs[0].imshow(jnp.exp(logits_1)[:, None], vmin=0) axs[1].imshow(jnp.exp(logits_2)[None, :], vmin=0) for j, (name, coupling) in enumerate(couplings.items()): axs[j + 2].imshow(coupling, vmin=0) axs[j + 2].set_title(f"{name}:\n{loss_values[name]}") def compute_and_visualize_coupling_experiments(experiments, results, seed, **kwargs): """Helper function to both compute couplings and visualize them.""" (logits_1, logits_2), couplings = get_coupling_estimates(experiments, results, seed, **kwargs) test_losses = compute_coupling_losses(experiments, logits_1, logits_2, couplings) visualize_coupling_experiments(test_losses, logits_1, logits_2, couplings) def evaluate_joint(joint_sampler, experiment, seed, num_pairs, joint_correction_num_samples=None): """Evaluates a particular coupling sampler for a particular task. Args: joint_sampler: Function from (p_logits, q_logits, key) to an approximate coupling. experiment: Experiment that determines the logit pair distribution. seed: PRNGKey to use. num_pairs: How many pairs of logits to evaluate on. joint_correction_num_samples: Correction term for number of samples, used when estimating variance across samples for a single model. Not important for results in the paper. Returns: A summary string, along with average loss, standard error of average loss across logit pairs, and estimate of the variance of the loss viewed as a random variable over sampled counterfactual pairs. Note that only the average loss is used to compute the results in the paper; the others were used for early experiments. """ rng = jax.random.PRNGKey(seed) def run_pair(key): k1, k2 = jax.random.split(key, 2) p_logits, q_logits = experiment.logit_pair_distribution_fn(k1) joint_estimate = joint_sampler(p_logits, q_logits, k2) coupling_loss_matrix = experiment.coupling_loss_matrix_fn( p_logits, q_logits) loss_average = jnp.sum(joint_estimate * coupling_loss_matrix) loss_inner_variance = jnp.sum( joint_estimate * jnp.square(coupling_loss_matrix - loss_average)) if joint_correction_num_samples: n = joint_correction_num_samples loss_inner_variance = loss_inner_variance * n / (n - 1) return loss_average, loss_inner_variance pair_averages, pair_variances = jax.lax.map(run_pair, jax.random.split(rng, num_pairs)) overall_average = jnp.mean(pair_averages) overall_average_stderr = jnp.std(pair_averages) / jnp.sqrt(num_pairs) overall_pair_std = jnp.sqrt(jnp.mean(pair_variances)) # overall_pair_variance_stderr = jnp.std(pair_variances) / jnp.sqrt(num_pairs) summary = (f"average: {overall_average:.4f}, " f"inner st.dev.: +/- {overall_pair_std:.4}, " f"errorbars: +/- {overall_average_stderr:.4f}") return summary, overall_average, overall_average_stderr, overall_pair_std def evaluate_experiment(experiment, result, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate a single experiment.""" joint_sampler = functools.partial( coupling_util.joint_from_samples, experiment.build_sampler(result.params), num_samples=samples_per_pair, loop_size=loop_size) return { experiment.name: evaluate_joint( joint_sampler, experiment, seed, num_pairs, joint_correction_num_samples=samples_per_pair) } def evaluate_baselines(experiment, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate the set of baselines.""" gumbel_max_joint_fn = functools.partial( coupling_util.joint_from_samples, coupling_util.gumbel_max_sampler, num_samples=samples_per_pair, loop_size=loop_size) return { "Independent": evaluate_joint( lambda p, q, _: coupling_util.independent_coupling(p, q), experiment, seed, num_pairs), "ICDF": evaluate_joint( lambda p, q, _: coupling_util.inverse_cdf_coupling(p, q), experiment, seed, num_pairs), "ICDF (permuted)": evaluate_joint( lambda p, q, _: coupling_util.permuted_inverse_cdf_coupling(p, q), experiment, seed, num_pairs), "Gumbel-max": evaluate_joint( gumbel_max_joint_fn, experiment, seed, num_pairs, joint_correction_num_samples=samples_per_pair), } def evaluate_all(experiments, results, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate all experiments and baselines.""" eval_results = evaluate_baselines( experiments[0], seed, num_pairs, samples_per_pair, loop_size=None) for ex, res in zip(experiments, results): eval_results.update( evaluate_experiment(ex, res, seed, num_pairs, samples_per_pair, loop_size)) return eval_results
train
identifier_name
experiment_util.py
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Shared utilities for experiments in the paper.""" import dataclasses import functools import sys import time import types from typing import Any, Callable, Tuple import jax import jax.numpy as jnp import matplotlib.pyplot as plt import numpy as np import optax from gumbel_max_causal_gadgets import coupling_util NDArray = PRNGKey = Any @dataclasses.dataclass(eq=False) class CouplingExperimentConfig: """Configuration and helper object for coupling experiments. Attributes: name: Name of the experiment model: Model definition (either gadget 1 or 2) logit_pair_distribution_fn: Function that produces random pairs of logits from the distribution D, given a key. coupling_loss_matrix_fn: Function that produces a matrix of penalties for each counterfactual coupling pair, given the two logit vectors. inner_num_samples: How many (relaxed) samples from the coupling to draw for each pair of logits. batch_size: How many pairs of logits to compute losses for at a time. use_transpose: Whether to pass a `transpose` argument to one of the sampled pairs. Should be True if and only if the model is Gadget 1. tx: optax optimizer definition to use. num_steps: How many training steps to use. print_every: Minimum frequency at which to print training progress. Always prints at powers of 2 regardless of this value. metadata: Arbitrary metadata to associate with this experiment. """ name: str model: Any logit_pair_distribution_fn: Callable[[PRNGKey], Tuple[NDArray, NDArray]] coupling_loss_matrix_fn: Callable[[NDArray, NDArray], NDArray] inner_num_samples: int batch_size: int use_transpose: bool tx: Any num_steps: int print_every: int = 100 metadata: Any = None def loss_and_metrics_one_pair(self, params, rng): """Samples a pair of logits, and computes loss and metrics.""" key_pq, key_samples = jax.random.split(rng) p_logits, q_logits = self.logit_pair_distribution_fn(key_pq) def sample_loss(key_sample): """Computes loss for a single sample of a relaxed pair of outcomes.""" q_kwargs = dict(transpose=True) if self.use_transpose else {} soft_p = self.model.apply( params, p_logits, key_sample, method=self.model.sample_relaxed) soft_q = self.model.apply( params, q_logits, key_sample, method=self.model.sample_relaxed, **q_kwargs) coupling_loss_matrix = self.coupling_loss_matrix_fn(p_logits, q_logits) coupling_loss = jnp.sum(soft_p[:, None] * soft_q[None, :] * coupling_loss_matrix) return coupling_loss loss_samples = jax.vmap(sample_loss)( jax.random.split(key_samples, self.inner_num_samples)) loss = jnp.mean(loss_samples) return loss, {"loss": loss} @functools.partial(jax.jit, static_argnums=0) def opt_step(self, opt_state, params, rng): """Performs one training step.""" def batch_loss(params, rng): stuff = jax.vmap(lambda rng: self.loss_and_metrics_one_pair(params, rng))( jax.random.split(rng, self.batch_size)) return jax.tree_map(jnp.mean, stuff) grads, metrics = jax.grad(batch_loss, has_aux=True)(params, rng) updates, new_opt_state = self.tx.update(grads, opt_state, params) new_params = optax.apply_updates(params, updates) any_was_nan = jax.tree_util.tree_reduce( jnp.logical_or, jax.tree_map(lambda v: jnp.any(jnp.isnan(v)), grads)) new_opt_state, new_params = jax.tree_map( lambda a, b: jnp.where(any_was_nan, a, b), (opt_state, params), (new_opt_state, new_params)) return new_opt_state, new_params, metrics, grads, any_was_nan def train(self, rng): """Training loop entry point. Calling this method runs the experiment described by this config, and returns various results collected during training. Args: rng: PRNGKey to use to initialize model and draw training examples. Returns: types.SimpleNamespace containing various results. Of importance: finished_reason: The reason we stopped training. params: The parameters we learned. """ # pylint: disable=possibly-unused-variable rng, init_key = jax.random.split(rng) params = self.model.init(init_key, jnp.zeros([self.model.S_dim])) opt_state = self.tx.init(params) start_time = time.time() count_since_reset = 0 all_metrics = [] try: i = 0 while i < self.num_steps: rng, key = jax.random.split(rng) # Pass the inputs in and take a gradient step. opt_state, params, metrics, grads, bad = self.opt_step( opt_state, params, key) all_metrics.append(jax.tree_map(np.array, metrics)) if bad: finished_reason = "nan" return types.SimpleNamespace(**locals()) count_since_reset += 1 if i % self.print_every == 0 or np.remainder(np.log2(i), 1) == 0: now = time.time() rate = count_since_reset / (now - start_time) start_time = now count_since_reset = 0 print(f"{i} [{rate}/s]:", jax.tree_map(float, metrics)) sys.stdout.flush() time.sleep(0.02) i += 1 except KeyboardInterrupt: finished_reason = "interrupt" return types.SimpleNamespace(**locals()) finished_reason = "done" (opt_state, params) = jax.tree_map(np.array, (opt_state, params)) return types.SimpleNamespace(**locals()) # pylint: enable=possibly-unused-variable def build_sampler(self, params): """Helper to build a joint sampler function for the model.""" def sampler(logits_1, logits_2, key): q_kwargs = dict(transpose=True) if self.use_transpose else {} x = self.model.bind(params).sample(logits_1, key) y = self.model.bind(params).sample(logits_2, key, **q_kwargs) return jnp.zeros([10, 10]).at[x, y].set(1.) return sampler def get_coupling_estimates(experiments, results, seed, logits_1=None, logits_2=None, num_joint_samples=10_000_000, logit_kwargs=None): """Computes couplings for a collection of experiments. All experiments should have the same logit_pair_distribution_fn. Args: experiments: List of experiments to evaluate. results: List of results, produced by calling `train` on each experiment. seed: Seed to use when estimating the coupling. logits_1: Optional logits. If not provided, uses logit_pair_distribution_fn. logits_2: Optional logits. If not provided, uses logit_pair_distribution_fn. num_joint_samples: How many samples to draw from the coupling when estimating it. logit_kwargs: Any keyword arguments that should be passed to the logit pair generator. Returns: (logits_1, logits_2), couplings where `couplings` is a dictionary whose keys include each of the experiments along with baselines, and the values are coupling matrices. """ logits_key, vis_key = jax.random.split(jax.random.PRNGKey(seed)) if logits_1 is None and logits_2 is None: logits_1, logits_2 = experiments[0].logit_pair_distribution_fn( logits_key, **(logit_kwargs or {})) logits_1 -= jax.scipy.special.logsumexp(logits_1) logits_2 -= jax.scipy.special.logsumexp(logits_2) probs_1 = jnp.exp(logits_1) probs_2 = jnp.exp(logits_2) independent_coupling = probs_1[:, None] * probs_2[None, :] gumbel_max_estimate = coupling_util.joint_from_samples( coupling_util.gumbel_max_sampler, logits_1, logits_2, vis_key, num_joint_samples, loop_size=500) icdf = coupling_util.inverse_cdf_coupling(logits_1, logits_2) icdf_perm = coupling_util.permuted_inverse_cdf_coupling(logits_1, logits_2) couplings = { "Independent": independent_coupling, "ICDF": icdf, "ICDF (permuted)": icdf_perm, "Gumbel-max": gumbel_max_estimate, } for experiment, result in zip(experiments, results): couplings[experiment.name] = coupling_util.joint_from_samples( experiment.build_sampler(result.params), logits_1, logits_2, vis_key, num_joint_samples, loop_size=500) return (logits_1, logits_2), couplings def compute_coupling_losses(experiments, logits_1, logits_2, estimated_couplings): """Estimate losses for each experiment. All experiments should have the same coupling_loss_matrix_fn. Args: experiments: List of experiments to evaluate. logits_1: First set of logits. logits_2: Second set of logits, estimated_couplings: A dictionary whose values are coupling matrices. Returns: A dictionary with the same keys as estimated_couplings whose values are estimates of the loss for this coupling. """ test_losses = {} for name, coupling in estimated_couplings.items(): loss_value = jnp.sum( coupling * experiments[0].coupling_loss_matrix_fn(logits_1, logits_2)) test_losses[name] = loss_value return test_losses def visualize_coupling_experiments(loss_values, logits_1, logits_2, couplings): """Visualizes the couplings using matplotlib. Args: loss_values: Loss values from `compute_coupling_losses` logits_1: First set of logits. logits_2: Second set of logits, couplings: A dictionary whose values are coupling matrices. """ ncols = 2 + len(couplings) _, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(4 * ncols, 4)) axs[0].imshow(jnp.exp(logits_1)[:, None], vmin=0) axs[1].imshow(jnp.exp(logits_2)[None, :], vmin=0) for j, (name, coupling) in enumerate(couplings.items()): axs[j + 2].imshow(coupling, vmin=0) axs[j + 2].set_title(f"{name}:\n{loss_values[name]}") def compute_and_visualize_coupling_experiments(experiments, results, seed, **kwargs): """Helper function to both compute couplings and visualize them.""" (logits_1, logits_2), couplings = get_coupling_estimates(experiments, results, seed, **kwargs) test_losses = compute_coupling_losses(experiments, logits_1, logits_2, couplings) visualize_coupling_experiments(test_losses, logits_1, logits_2, couplings) def evaluate_joint(joint_sampler, experiment, seed, num_pairs, joint_correction_num_samples=None): """Evaluates a particular coupling sampler for a particular task. Args: joint_sampler: Function from (p_logits, q_logits, key) to an approximate coupling. experiment: Experiment that determines the logit pair distribution. seed: PRNGKey to use. num_pairs: How many pairs of logits to evaluate on. joint_correction_num_samples: Correction term for number of samples, used when estimating variance across samples for a single model. Not important for results in the paper. Returns: A summary string, along with average loss, standard error of average loss across logit pairs, and estimate of the variance of the loss viewed as a random variable over sampled counterfactual pairs. Note that only the average loss is used to compute the results in the paper; the others were used for early experiments. """ rng = jax.random.PRNGKey(seed) def run_pair(key): k1, k2 = jax.random.split(key, 2) p_logits, q_logits = experiment.logit_pair_distribution_fn(k1) joint_estimate = joint_sampler(p_logits, q_logits, k2) coupling_loss_matrix = experiment.coupling_loss_matrix_fn( p_logits, q_logits) loss_average = jnp.sum(joint_estimate * coupling_loss_matrix) loss_inner_variance = jnp.sum( joint_estimate * jnp.square(coupling_loss_matrix - loss_average)) if joint_correction_num_samples: n = joint_correction_num_samples loss_inner_variance = loss_inner_variance * n / (n - 1) return loss_average, loss_inner_variance pair_averages, pair_variances = jax.lax.map(run_pair, jax.random.split(rng, num_pairs)) overall_average = jnp.mean(pair_averages) overall_average_stderr = jnp.std(pair_averages) / jnp.sqrt(num_pairs) overall_pair_std = jnp.sqrt(jnp.mean(pair_variances)) # overall_pair_variance_stderr = jnp.std(pair_variances) / jnp.sqrt(num_pairs) summary = (f"average: {overall_average:.4f}, " f"inner st.dev.: +/- {overall_pair_std:.4}, " f"errorbars: +/- {overall_average_stderr:.4f}") return summary, overall_average, overall_average_stderr, overall_pair_std def evaluate_experiment(experiment, result, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate a single experiment.""" joint_sampler = functools.partial( coupling_util.joint_from_samples, experiment.build_sampler(result.params), num_samples=samples_per_pair, loop_size=loop_size) return { experiment.name: evaluate_joint( joint_sampler, experiment, seed, num_pairs, joint_correction_num_samples=samples_per_pair) } def evaluate_baselines(experiment, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate the set of baselines.""" gumbel_max_joint_fn = functools.partial( coupling_util.joint_from_samples, coupling_util.gumbel_max_sampler, num_samples=samples_per_pair, loop_size=loop_size) return { "Independent": evaluate_joint( lambda p, q, _: coupling_util.independent_coupling(p, q), experiment, seed, num_pairs), "ICDF": evaluate_joint( lambda p, q, _: coupling_util.inverse_cdf_coupling(p, q), experiment, seed, num_pairs), "ICDF (permuted)": evaluate_joint( lambda p, q, _: coupling_util.permuted_inverse_cdf_coupling(p, q), experiment, seed, num_pairs), "Gumbel-max": evaluate_joint( gumbel_max_joint_fn, experiment, seed, num_pairs, joint_correction_num_samples=samples_per_pair), } def evaluate_all(experiments, results, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate all experiments and baselines.""" eval_results = evaluate_baselines( experiments[0], seed, num_pairs, samples_per_pair, loop_size=None) for ex, res in zip(experiments, results):
return eval_results
eval_results.update( evaluate_experiment(ex, res, seed, num_pairs, samples_per_pair, loop_size))
conditional_block
experiment_util.py
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Shared utilities for experiments in the paper.""" import dataclasses import functools import sys import time import types from typing import Any, Callable, Tuple import jax import jax.numpy as jnp import matplotlib.pyplot as plt import numpy as np import optax from gumbel_max_causal_gadgets import coupling_util NDArray = PRNGKey = Any @dataclasses.dataclass(eq=False) class CouplingExperimentConfig: """Configuration and helper object for coupling experiments. Attributes: name: Name of the experiment model: Model definition (either gadget 1 or 2) logit_pair_distribution_fn: Function that produces random pairs of logits from the distribution D, given a key. coupling_loss_matrix_fn: Function that produces a matrix of penalties for each counterfactual coupling pair, given the two logit vectors. inner_num_samples: How many (relaxed) samples from the coupling to draw for each pair of logits. batch_size: How many pairs of logits to compute losses for at a time. use_transpose: Whether to pass a `transpose` argument to one of the sampled pairs. Should be True if and only if the model is Gadget 1. tx: optax optimizer definition to use. num_steps: How many training steps to use. print_every: Minimum frequency at which to print training progress. Always prints at powers of 2 regardless of this value. metadata: Arbitrary metadata to associate with this experiment. """ name: str model: Any logit_pair_distribution_fn: Callable[[PRNGKey], Tuple[NDArray, NDArray]] coupling_loss_matrix_fn: Callable[[NDArray, NDArray], NDArray] inner_num_samples: int batch_size: int use_transpose: bool tx: Any num_steps: int print_every: int = 100 metadata: Any = None def loss_and_metrics_one_pair(self, params, rng): """Samples a pair of logits, and computes loss and metrics.""" key_pq, key_samples = jax.random.split(rng) p_logits, q_logits = self.logit_pair_distribution_fn(key_pq) def sample_loss(key_sample): """Computes loss for a single sample of a relaxed pair of outcomes.""" q_kwargs = dict(transpose=True) if self.use_transpose else {} soft_p = self.model.apply( params, p_logits, key_sample, method=self.model.sample_relaxed) soft_q = self.model.apply( params, q_logits, key_sample, method=self.model.sample_relaxed, **q_kwargs) coupling_loss_matrix = self.coupling_loss_matrix_fn(p_logits, q_logits) coupling_loss = jnp.sum(soft_p[:, None] * soft_q[None, :] * coupling_loss_matrix) return coupling_loss loss_samples = jax.vmap(sample_loss)( jax.random.split(key_samples, self.inner_num_samples)) loss = jnp.mean(loss_samples) return loss, {"loss": loss} @functools.partial(jax.jit, static_argnums=0) def opt_step(self, opt_state, params, rng): """Performs one training step.""" def batch_loss(params, rng): stuff = jax.vmap(lambda rng: self.loss_and_metrics_one_pair(params, rng))( jax.random.split(rng, self.batch_size)) return jax.tree_map(jnp.mean, stuff) grads, metrics = jax.grad(batch_loss, has_aux=True)(params, rng) updates, new_opt_state = self.tx.update(grads, opt_state, params) new_params = optax.apply_updates(params, updates) any_was_nan = jax.tree_util.tree_reduce( jnp.logical_or, jax.tree_map(lambda v: jnp.any(jnp.isnan(v)), grads)) new_opt_state, new_params = jax.tree_map( lambda a, b: jnp.where(any_was_nan, a, b), (opt_state, params), (new_opt_state, new_params)) return new_opt_state, new_params, metrics, grads, any_was_nan def train(self, rng): """Training loop entry point. Calling this method runs the experiment described by this config, and returns various results collected during training. Args: rng: PRNGKey to use to initialize model and draw training examples. Returns: types.SimpleNamespace containing various results. Of importance: finished_reason: The reason we stopped training. params: The parameters we learned. """ # pylint: disable=possibly-unused-variable rng, init_key = jax.random.split(rng) params = self.model.init(init_key, jnp.zeros([self.model.S_dim])) opt_state = self.tx.init(params) start_time = time.time() count_since_reset = 0 all_metrics = [] try: i = 0 while i < self.num_steps: rng, key = jax.random.split(rng) # Pass the inputs in and take a gradient step. opt_state, params, metrics, grads, bad = self.opt_step( opt_state, params, key) all_metrics.append(jax.tree_map(np.array, metrics)) if bad: finished_reason = "nan" return types.SimpleNamespace(**locals()) count_since_reset += 1 if i % self.print_every == 0 or np.remainder(np.log2(i), 1) == 0: now = time.time() rate = count_since_reset / (now - start_time) start_time = now count_since_reset = 0 print(f"{i} [{rate}/s]:", jax.tree_map(float, metrics)) sys.stdout.flush() time.sleep(0.02) i += 1 except KeyboardInterrupt: finished_reason = "interrupt" return types.SimpleNamespace(**locals()) finished_reason = "done" (opt_state, params) = jax.tree_map(np.array, (opt_state, params)) return types.SimpleNamespace(**locals()) # pylint: enable=possibly-unused-variable def build_sampler(self, params): """Helper to build a joint sampler function for the model.""" def sampler(logits_1, logits_2, key):
return sampler def get_coupling_estimates(experiments, results, seed, logits_1=None, logits_2=None, num_joint_samples=10_000_000, logit_kwargs=None): """Computes couplings for a collection of experiments. All experiments should have the same logit_pair_distribution_fn. Args: experiments: List of experiments to evaluate. results: List of results, produced by calling `train` on each experiment. seed: Seed to use when estimating the coupling. logits_1: Optional logits. If not provided, uses logit_pair_distribution_fn. logits_2: Optional logits. If not provided, uses logit_pair_distribution_fn. num_joint_samples: How many samples to draw from the coupling when estimating it. logit_kwargs: Any keyword arguments that should be passed to the logit pair generator. Returns: (logits_1, logits_2), couplings where `couplings` is a dictionary whose keys include each of the experiments along with baselines, and the values are coupling matrices. """ logits_key, vis_key = jax.random.split(jax.random.PRNGKey(seed)) if logits_1 is None and logits_2 is None: logits_1, logits_2 = experiments[0].logit_pair_distribution_fn( logits_key, **(logit_kwargs or {})) logits_1 -= jax.scipy.special.logsumexp(logits_1) logits_2 -= jax.scipy.special.logsumexp(logits_2) probs_1 = jnp.exp(logits_1) probs_2 = jnp.exp(logits_2) independent_coupling = probs_1[:, None] * probs_2[None, :] gumbel_max_estimate = coupling_util.joint_from_samples( coupling_util.gumbel_max_sampler, logits_1, logits_2, vis_key, num_joint_samples, loop_size=500) icdf = coupling_util.inverse_cdf_coupling(logits_1, logits_2) icdf_perm = coupling_util.permuted_inverse_cdf_coupling(logits_1, logits_2) couplings = { "Independent": independent_coupling, "ICDF": icdf, "ICDF (permuted)": icdf_perm, "Gumbel-max": gumbel_max_estimate, } for experiment, result in zip(experiments, results): couplings[experiment.name] = coupling_util.joint_from_samples( experiment.build_sampler(result.params), logits_1, logits_2, vis_key, num_joint_samples, loop_size=500) return (logits_1, logits_2), couplings def compute_coupling_losses(experiments, logits_1, logits_2, estimated_couplings): """Estimate losses for each experiment. All experiments should have the same coupling_loss_matrix_fn. Args: experiments: List of experiments to evaluate. logits_1: First set of logits. logits_2: Second set of logits, estimated_couplings: A dictionary whose values are coupling matrices. Returns: A dictionary with the same keys as estimated_couplings whose values are estimates of the loss for this coupling. """ test_losses = {} for name, coupling in estimated_couplings.items(): loss_value = jnp.sum( coupling * experiments[0].coupling_loss_matrix_fn(logits_1, logits_2)) test_losses[name] = loss_value return test_losses def visualize_coupling_experiments(loss_values, logits_1, logits_2, couplings): """Visualizes the couplings using matplotlib. Args: loss_values: Loss values from `compute_coupling_losses` logits_1: First set of logits. logits_2: Second set of logits, couplings: A dictionary whose values are coupling matrices. """ ncols = 2 + len(couplings) _, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(4 * ncols, 4)) axs[0].imshow(jnp.exp(logits_1)[:, None], vmin=0) axs[1].imshow(jnp.exp(logits_2)[None, :], vmin=0) for j, (name, coupling) in enumerate(couplings.items()): axs[j + 2].imshow(coupling, vmin=0) axs[j + 2].set_title(f"{name}:\n{loss_values[name]}") def compute_and_visualize_coupling_experiments(experiments, results, seed, **kwargs): """Helper function to both compute couplings and visualize them.""" (logits_1, logits_2), couplings = get_coupling_estimates(experiments, results, seed, **kwargs) test_losses = compute_coupling_losses(experiments, logits_1, logits_2, couplings) visualize_coupling_experiments(test_losses, logits_1, logits_2, couplings) def evaluate_joint(joint_sampler, experiment, seed, num_pairs, joint_correction_num_samples=None): """Evaluates a particular coupling sampler for a particular task. Args: joint_sampler: Function from (p_logits, q_logits, key) to an approximate coupling. experiment: Experiment that determines the logit pair distribution. seed: PRNGKey to use. num_pairs: How many pairs of logits to evaluate on. joint_correction_num_samples: Correction term for number of samples, used when estimating variance across samples for a single model. Not important for results in the paper. Returns: A summary string, along with average loss, standard error of average loss across logit pairs, and estimate of the variance of the loss viewed as a random variable over sampled counterfactual pairs. Note that only the average loss is used to compute the results in the paper; the others were used for early experiments. """ rng = jax.random.PRNGKey(seed) def run_pair(key): k1, k2 = jax.random.split(key, 2) p_logits, q_logits = experiment.logit_pair_distribution_fn(k1) joint_estimate = joint_sampler(p_logits, q_logits, k2) coupling_loss_matrix = experiment.coupling_loss_matrix_fn( p_logits, q_logits) loss_average = jnp.sum(joint_estimate * coupling_loss_matrix) loss_inner_variance = jnp.sum( joint_estimate * jnp.square(coupling_loss_matrix - loss_average)) if joint_correction_num_samples: n = joint_correction_num_samples loss_inner_variance = loss_inner_variance * n / (n - 1) return loss_average, loss_inner_variance pair_averages, pair_variances = jax.lax.map(run_pair, jax.random.split(rng, num_pairs)) overall_average = jnp.mean(pair_averages) overall_average_stderr = jnp.std(pair_averages) / jnp.sqrt(num_pairs) overall_pair_std = jnp.sqrt(jnp.mean(pair_variances)) # overall_pair_variance_stderr = jnp.std(pair_variances) / jnp.sqrt(num_pairs) summary = (f"average: {overall_average:.4f}, " f"inner st.dev.: +/- {overall_pair_std:.4}, " f"errorbars: +/- {overall_average_stderr:.4f}") return summary, overall_average, overall_average_stderr, overall_pair_std def evaluate_experiment(experiment, result, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate a single experiment.""" joint_sampler = functools.partial( coupling_util.joint_from_samples, experiment.build_sampler(result.params), num_samples=samples_per_pair, loop_size=loop_size) return { experiment.name: evaluate_joint( joint_sampler, experiment, seed, num_pairs, joint_correction_num_samples=samples_per_pair) } def evaluate_baselines(experiment, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate the set of baselines.""" gumbel_max_joint_fn = functools.partial( coupling_util.joint_from_samples, coupling_util.gumbel_max_sampler, num_samples=samples_per_pair, loop_size=loop_size) return { "Independent": evaluate_joint( lambda p, q, _: coupling_util.independent_coupling(p, q), experiment, seed, num_pairs), "ICDF": evaluate_joint( lambda p, q, _: coupling_util.inverse_cdf_coupling(p, q), experiment, seed, num_pairs), "ICDF (permuted)": evaluate_joint( lambda p, q, _: coupling_util.permuted_inverse_cdf_coupling(p, q), experiment, seed, num_pairs), "Gumbel-max": evaluate_joint( gumbel_max_joint_fn, experiment, seed, num_pairs, joint_correction_num_samples=samples_per_pair), } def evaluate_all(experiments, results, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate all experiments and baselines.""" eval_results = evaluate_baselines( experiments[0], seed, num_pairs, samples_per_pair, loop_size=None) for ex, res in zip(experiments, results): eval_results.update( evaluate_experiment(ex, res, seed, num_pairs, samples_per_pair, loop_size)) return eval_results
q_kwargs = dict(transpose=True) if self.use_transpose else {} x = self.model.bind(params).sample(logits_1, key) y = self.model.bind(params).sample(logits_2, key, **q_kwargs) return jnp.zeros([10, 10]).at[x, y].set(1.)
identifier_body
experiment_util.py
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Shared utilities for experiments in the paper.""" import dataclasses import functools import sys import time import types from typing import Any, Callable, Tuple import jax import jax.numpy as jnp import matplotlib.pyplot as plt import numpy as np import optax from gumbel_max_causal_gadgets import coupling_util NDArray = PRNGKey = Any @dataclasses.dataclass(eq=False) class CouplingExperimentConfig: """Configuration and helper object for coupling experiments. Attributes: name: Name of the experiment model: Model definition (either gadget 1 or 2) logit_pair_distribution_fn: Function that produces random pairs of logits from the distribution D, given a key. coupling_loss_matrix_fn: Function that produces a matrix of penalties for each counterfactual coupling pair, given the two logit vectors. inner_num_samples: How many (relaxed) samples from the coupling to draw for each pair of logits. batch_size: How many pairs of logits to compute losses for at a time. use_transpose: Whether to pass a `transpose` argument to one of the sampled pairs. Should be True if and only if the model is Gadget 1. tx: optax optimizer definition to use. num_steps: How many training steps to use. print_every: Minimum frequency at which to print training progress. Always prints at powers of 2 regardless of this value. metadata: Arbitrary metadata to associate with this experiment. """ name: str model: Any logit_pair_distribution_fn: Callable[[PRNGKey], Tuple[NDArray, NDArray]] coupling_loss_matrix_fn: Callable[[NDArray, NDArray], NDArray] inner_num_samples: int batch_size: int use_transpose: bool tx: Any num_steps: int print_every: int = 100 metadata: Any = None def loss_and_metrics_one_pair(self, params, rng): """Samples a pair of logits, and computes loss and metrics.""" key_pq, key_samples = jax.random.split(rng) p_logits, q_logits = self.logit_pair_distribution_fn(key_pq) def sample_loss(key_sample): """Computes loss for a single sample of a relaxed pair of outcomes.""" q_kwargs = dict(transpose=True) if self.use_transpose else {} soft_p = self.model.apply( params, p_logits, key_sample, method=self.model.sample_relaxed) soft_q = self.model.apply( params, q_logits, key_sample, method=self.model.sample_relaxed, **q_kwargs) coupling_loss_matrix = self.coupling_loss_matrix_fn(p_logits, q_logits) coupling_loss = jnp.sum(soft_p[:, None] * soft_q[None, :] * coupling_loss_matrix) return coupling_loss loss_samples = jax.vmap(sample_loss)( jax.random.split(key_samples, self.inner_num_samples)) loss = jnp.mean(loss_samples) return loss, {"loss": loss} @functools.partial(jax.jit, static_argnums=0) def opt_step(self, opt_state, params, rng): """Performs one training step.""" def batch_loss(params, rng): stuff = jax.vmap(lambda rng: self.loss_and_metrics_one_pair(params, rng))( jax.random.split(rng, self.batch_size)) return jax.tree_map(jnp.mean, stuff) grads, metrics = jax.grad(batch_loss, has_aux=True)(params, rng) updates, new_opt_state = self.tx.update(grads, opt_state, params) new_params = optax.apply_updates(params, updates) any_was_nan = jax.tree_util.tree_reduce( jnp.logical_or, jax.tree_map(lambda v: jnp.any(jnp.isnan(v)), grads)) new_opt_state, new_params = jax.tree_map( lambda a, b: jnp.where(any_was_nan, a, b), (opt_state, params), (new_opt_state, new_params)) return new_opt_state, new_params, metrics, grads, any_was_nan def train(self, rng): """Training loop entry point. Calling this method runs the experiment described by this config, and returns various results collected during training. Args: rng: PRNGKey to use to initialize model and draw training examples. Returns: types.SimpleNamespace containing various results. Of importance: finished_reason: The reason we stopped training. params: The parameters we learned. """ # pylint: disable=possibly-unused-variable rng, init_key = jax.random.split(rng) params = self.model.init(init_key, jnp.zeros([self.model.S_dim])) opt_state = self.tx.init(params) start_time = time.time() count_since_reset = 0 all_metrics = [] try: i = 0 while i < self.num_steps: rng, key = jax.random.split(rng) # Pass the inputs in and take a gradient step. opt_state, params, metrics, grads, bad = self.opt_step( opt_state, params, key) all_metrics.append(jax.tree_map(np.array, metrics)) if bad: finished_reason = "nan" return types.SimpleNamespace(**locals()) count_since_reset += 1 if i % self.print_every == 0 or np.remainder(np.log2(i), 1) == 0: now = time.time() rate = count_since_reset / (now - start_time) start_time = now count_since_reset = 0 print(f"{i} [{rate}/s]:", jax.tree_map(float, metrics)) sys.stdout.flush() time.sleep(0.02) i += 1 except KeyboardInterrupt: finished_reason = "interrupt" return types.SimpleNamespace(**locals()) finished_reason = "done" (opt_state, params) = jax.tree_map(np.array, (opt_state, params)) return types.SimpleNamespace(**locals()) # pylint: enable=possibly-unused-variable def build_sampler(self, params): """Helper to build a joint sampler function for the model.""" def sampler(logits_1, logits_2, key): q_kwargs = dict(transpose=True) if self.use_transpose else {} x = self.model.bind(params).sample(logits_1, key) y = self.model.bind(params).sample(logits_2, key, **q_kwargs) return jnp.zeros([10, 10]).at[x, y].set(1.) return sampler def get_coupling_estimates(experiments, results, seed, logits_1=None, logits_2=None, num_joint_samples=10_000_000, logit_kwargs=None): """Computes couplings for a collection of experiments. All experiments should have the same logit_pair_distribution_fn. Args: experiments: List of experiments to evaluate. results: List of results, produced by calling `train` on each experiment. seed: Seed to use when estimating the coupling. logits_1: Optional logits. If not provided, uses logit_pair_distribution_fn. logits_2: Optional logits. If not provided, uses logit_pair_distribution_fn. num_joint_samples: How many samples to draw from the coupling when estimating it. logit_kwargs: Any keyword arguments that should be passed to the logit pair generator. Returns: (logits_1, logits_2), couplings where `couplings` is a dictionary whose keys include each of the experiments along with baselines, and the values are coupling matrices. """ logits_key, vis_key = jax.random.split(jax.random.PRNGKey(seed)) if logits_1 is None and logits_2 is None: logits_1, logits_2 = experiments[0].logit_pair_distribution_fn( logits_key, **(logit_kwargs or {})) logits_1 -= jax.scipy.special.logsumexp(logits_1) logits_2 -= jax.scipy.special.logsumexp(logits_2) probs_1 = jnp.exp(logits_1) probs_2 = jnp.exp(logits_2) independent_coupling = probs_1[:, None] * probs_2[None, :] gumbel_max_estimate = coupling_util.joint_from_samples( coupling_util.gumbel_max_sampler, logits_1, logits_2, vis_key, num_joint_samples, loop_size=500) icdf = coupling_util.inverse_cdf_coupling(logits_1, logits_2) icdf_perm = coupling_util.permuted_inverse_cdf_coupling(logits_1, logits_2) couplings = { "Independent": independent_coupling, "ICDF": icdf, "ICDF (permuted)": icdf_perm, "Gumbel-max": gumbel_max_estimate, } for experiment, result in zip(experiments, results): couplings[experiment.name] = coupling_util.joint_from_samples( experiment.build_sampler(result.params), logits_1, logits_2, vis_key, num_joint_samples, loop_size=500) return (logits_1, logits_2), couplings def compute_coupling_losses(experiments, logits_1, logits_2, estimated_couplings): """Estimate losses for each experiment. All experiments should have the same coupling_loss_matrix_fn. Args: experiments: List of experiments to evaluate. logits_1: First set of logits. logits_2: Second set of logits, estimated_couplings: A dictionary whose values are coupling matrices. Returns: A dictionary with the same keys as estimated_couplings whose values are estimates of the loss for this coupling. """ test_losses = {} for name, coupling in estimated_couplings.items(): loss_value = jnp.sum( coupling * experiments[0].coupling_loss_matrix_fn(logits_1, logits_2)) test_losses[name] = loss_value return test_losses def visualize_coupling_experiments(loss_values, logits_1, logits_2, couplings): """Visualizes the couplings using matplotlib. Args: loss_values: Loss values from `compute_coupling_losses` logits_1: First set of logits. logits_2: Second set of logits, couplings: A dictionary whose values are coupling matrices. """ ncols = 2 + len(couplings)
axs[j + 2].set_title(f"{name}:\n{loss_values[name]}") def compute_and_visualize_coupling_experiments(experiments, results, seed, **kwargs): """Helper function to both compute couplings and visualize them.""" (logits_1, logits_2), couplings = get_coupling_estimates(experiments, results, seed, **kwargs) test_losses = compute_coupling_losses(experiments, logits_1, logits_2, couplings) visualize_coupling_experiments(test_losses, logits_1, logits_2, couplings) def evaluate_joint(joint_sampler, experiment, seed, num_pairs, joint_correction_num_samples=None): """Evaluates a particular coupling sampler for a particular task. Args: joint_sampler: Function from (p_logits, q_logits, key) to an approximate coupling. experiment: Experiment that determines the logit pair distribution. seed: PRNGKey to use. num_pairs: How many pairs of logits to evaluate on. joint_correction_num_samples: Correction term for number of samples, used when estimating variance across samples for a single model. Not important for results in the paper. Returns: A summary string, along with average loss, standard error of average loss across logit pairs, and estimate of the variance of the loss viewed as a random variable over sampled counterfactual pairs. Note that only the average loss is used to compute the results in the paper; the others were used for early experiments. """ rng = jax.random.PRNGKey(seed) def run_pair(key): k1, k2 = jax.random.split(key, 2) p_logits, q_logits = experiment.logit_pair_distribution_fn(k1) joint_estimate = joint_sampler(p_logits, q_logits, k2) coupling_loss_matrix = experiment.coupling_loss_matrix_fn( p_logits, q_logits) loss_average = jnp.sum(joint_estimate * coupling_loss_matrix) loss_inner_variance = jnp.sum( joint_estimate * jnp.square(coupling_loss_matrix - loss_average)) if joint_correction_num_samples: n = joint_correction_num_samples loss_inner_variance = loss_inner_variance * n / (n - 1) return loss_average, loss_inner_variance pair_averages, pair_variances = jax.lax.map(run_pair, jax.random.split(rng, num_pairs)) overall_average = jnp.mean(pair_averages) overall_average_stderr = jnp.std(pair_averages) / jnp.sqrt(num_pairs) overall_pair_std = jnp.sqrt(jnp.mean(pair_variances)) # overall_pair_variance_stderr = jnp.std(pair_variances) / jnp.sqrt(num_pairs) summary = (f"average: {overall_average:.4f}, " f"inner st.dev.: +/- {overall_pair_std:.4}, " f"errorbars: +/- {overall_average_stderr:.4f}") return summary, overall_average, overall_average_stderr, overall_pair_std def evaluate_experiment(experiment, result, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate a single experiment.""" joint_sampler = functools.partial( coupling_util.joint_from_samples, experiment.build_sampler(result.params), num_samples=samples_per_pair, loop_size=loop_size) return { experiment.name: evaluate_joint( joint_sampler, experiment, seed, num_pairs, joint_correction_num_samples=samples_per_pair) } def evaluate_baselines(experiment, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate the set of baselines.""" gumbel_max_joint_fn = functools.partial( coupling_util.joint_from_samples, coupling_util.gumbel_max_sampler, num_samples=samples_per_pair, loop_size=loop_size) return { "Independent": evaluate_joint( lambda p, q, _: coupling_util.independent_coupling(p, q), experiment, seed, num_pairs), "ICDF": evaluate_joint( lambda p, q, _: coupling_util.inverse_cdf_coupling(p, q), experiment, seed, num_pairs), "ICDF (permuted)": evaluate_joint( lambda p, q, _: coupling_util.permuted_inverse_cdf_coupling(p, q), experiment, seed, num_pairs), "Gumbel-max": evaluate_joint( gumbel_max_joint_fn, experiment, seed, num_pairs, joint_correction_num_samples=samples_per_pair), } def evaluate_all(experiments, results, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate all experiments and baselines.""" eval_results = evaluate_baselines( experiments[0], seed, num_pairs, samples_per_pair, loop_size=None) for ex, res in zip(experiments, results): eval_results.update( evaluate_experiment(ex, res, seed, num_pairs, samples_per_pair, loop_size)) return eval_results
_, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(4 * ncols, 4)) axs[0].imshow(jnp.exp(logits_1)[:, None], vmin=0) axs[1].imshow(jnp.exp(logits_2)[None, :], vmin=0) for j, (name, coupling) in enumerate(couplings.items()): axs[j + 2].imshow(coupling, vmin=0)
random_line_split
gesture_recognition_3.4.py
# -*- encoding: utf-8 -*- ''' @File : gesture_recognition_3.4.py @Time : 2021/07/18 19:09:12 @Author : Yu Xiao 于潇 @Version : 1.0 @Contact : superyuxiao@icloud.com @License : (C)Copyright 2020-2021, Key Laboratory of University Wireless Communication Beijing University of Posts and Telecommunications @Desc : None ''' # ------------------------------ file details ------------------------------ # # 四个人,一个位置,巴特沃斯低通,PCA,九个天线对,81*9输入CNN # 使用pytorch重构 # 创建自己的数据集,但是速度特别特别特别慢 # 四个人,一个位置,巴特沃斯低通,30路子载波,一个天线对,81*30输入CNN。修改了网络,添加了一个全连接层。 # (模型不收敛可能是全连接层的输入输出分配不好,也可能是学习率的问题,目前0.001) # 按不同人划分训练集和测试集 # ------------------------------ file details ------------------------------ # # 加载相关库 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision import torchvision.transforms as transforms import numpy as np from sklearn.utils import shuffle import datetime from preprocessing import mul_subcarries # 定义数据集读取器 def load_data(filepath=None): # ! 读取数据文件 # * 读取数据 feature_number = 81 * 30 # ! DX # 手势O,位置1 filepath_O_1 = filepath + 'DX/O/gresture_O_location_1_' csi_DX_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 手势X,位置1 filepath_X_1 = filepath + 'DX/X/gresture_X_location_1_' csi_DX_X_1 = mul_subcarries(filepath_X_1, feature_number, 1) # 手势PO,位置1 filepath_PO_1 = filepath + 'DX/PO/gresture_PO_location_1_' csi_DX_PO_1 = mul_subcarries(filepath_PO_1, feature_number, 2) # 整合 csi_DX_1 = np.array((csi_DX_O_1, csi_DX_X_1, csi_DX_PO_1)) csi_DX_1 = np.reshape(csi_DX_1, (-1, feature_number + 1)) # ! 注意修改 print(datetime.datetime.now()) # ! LJP # 手势O,位置1 filepath_O_1 = filepath + 'LJP/O/gresture_O_location_1_' csi_LJP_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 手势X,位置1 filepath_X_1 = filepath + 'LJP/X/gresture_X_location_1_' csi_LJP_X_1 = mul_subcarries(filepath_X_1, feature_number, 1) # 手势PO,位置1 filepath_PO_1 = filepath + 'LJP/PO/gresture_PO_location_1_' csi_LJP_PO_1 = mul_subcarries(filepath_PO_1, feature_number, 2) # 整合 csi_LJP_1 = np.array((csi_LJP_O_1, csi_LJP_X_1, csi_LJP_PO_1)) csi_LJP_1 = np.reshape(csi_LJP_1, (-1, feature_number + 1)) print(datetime.datetime.now()) # ! LZW # 手势O,位置1 filepath_O_1 = filepath + 'LZW/O/gresture_O_location_1_' csi_LZW_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 手势X,位置1 filepath_X_1 = filepath + 'LZW/X/gresture_X_location_1_' csi_LZW_X_1 = mul_subcarries(filepath_X_1, feature_number, 1) # 手势PO,位置1 filepath_PO_1 = filepath + 'LZW/PO/gresture_PO_location_1_' csi_LZW_PO_1 = mul_subcarries(filepath_PO_1, feature_number, 2) # 整合 csi_LZW_1 = np.array((csi_LZW_O_1, csi_LZW_X_1, csi_LZW_PO_1)) csi_LZW_1 = np.reshape(csi_LZW_1, (-1, feature_number + 1)) print(datetime.datetime.now()) # ! MYW # 手势O,位置1 # ? 只有手势O filepath_O_1 = filepath + 'MYW/O/gresture_O_location_1_' csi_MYW_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 整合 csi_MYW_1 = np.array((csi_MYW_O_1)) csi_MYW_1 = np.reshape(csi_MYW_1, (-1, feature_number + 1))
csi_1 = np.array((csi_LJP_1, csi_DX_1)) csi_1 = np.reshape(csi_1, (-1, feature_number + 1)) csi_1 = np.append(csi_1, csi_MYW_1, axis=0) csi_1 = np.reshape(csi_1, (-1, feature_number + 1)) # 分割特征和标签 train_feature, train_label = np.split(csi_1, (feature_number,), axis=1) test_feature, test_label = np.split(csi_LZW_1, (feature_number,), axis=1) train_feature, train_label = shuffle(train_feature, train_label, random_state=1) test_feature, test_label = shuffle(test_feature, test_label, random_state=1) # feature, label = np.split(csi_1, (feature_number,), # axis=1) # feature(150,5),label(150,1) #pylint: disable=unbalanced-tuple-unpacking #防止出现一条警告 # # 划分训练集和测试集 # train_feature, test_feature, train_label, test_label = train_test_split(feature, label, random_state=1, # test_size=0.3) return train_feature, test_feature, train_label, test_label def load_dataset(mode='train', train_feature=None, test_feature=None, train_label=None, test_label=None, BATCHSIZE=15): # 根据输入mode参数决定使用训练集,验证集还是测试 if mode == 'train': imgs = train_feature labels = train_label elif mode == 'test': imgs = test_feature labels = test_label # 获得所有图像的数量 imgs_length = len(imgs) index_list = list(range(imgs_length)) # 定义数据生成器 def data_generator(): imgs_list = [] labels_list = [] # 按照索引读取数据 for i in index_list: # 读取图像和标签,转换其尺寸和类型 img = np.reshape(imgs[i], [1, 81, 30]).astype('float32') label = np.reshape(labels[i], [1]).astype('int64') imgs_list.append(img) labels_list.append(label) # 如果当前数据缓存达到了batch size,就返回一个批次数据 if len(imgs_list) == BATCHSIZE: yield np.array(imgs_list), np.array(labels_list) # 清空数据缓存列表 imgs_list = [] labels_list = [] # 如果剩余数据的数目小于BATCHSIZE, # 则剩余数据一起构成一个大小为len(imgs_list)的mini-batch if len(imgs_list) > 0: yield np.array(imgs_list), np.array(labels_list) return data_generator # 定义模型结构 class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() # 定义卷积层,输出特征通道out_channels设置为20,卷积核的大小kernel_size为5,卷积步长stride=1,padding=2 self.conv1 = nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5, stride=1, padding=5) # 定义池化层,池化核的大小kernel_size为2,池化步长为2 self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2) # 定义卷积层,输出特征通道out_channels设置为20,卷积核的大小kernel_size为5,卷积步长stride=1,padding=2 self.conv2 = nn.Conv2d(in_channels=10, out_channels=10, kernel_size=5, stride=1, padding=5) # 定义池化层,池化核的大小kernel_size为2,池化步长为2 self.max_pool2 = nn.MaxPool2d(kernel_size=2, stride=2) # 定义一层全连接层,输出维度是10 self.fc1 = nn.Linear(in_features=2880, out_features=96) # 定义一层全连接层,输出维度是10 self.fc2 = nn.Linear(in_features=96, out_features=3) # 定义网络前向计算过程,卷积后紧接着使用池化层,最后使用全连接层计算最终输出 # 卷积层激活函数使用Relu,全连接层激活函数使用softmax def forward(self, inputs): x = self.conv1(inputs) x = F.relu(x) x = self.max_pool1(x) x = self.conv2(x) x = F.relu(x) x = self.max_pool2(x) x = x.view([x.shape[0], 2880]) x = self.fc1(x) x = self.fc2(x) x = F.softmax(x, dim=1) return x if __name__ == '__main__': # 仅优化算法的设置有所差别 model = CNN() model.train() params = list(model.parameters()) BATCHSIZE = 15 # 调用加载数据的函数 train_feature, test_feature, train_label, test_label = load_data('/Users/yuxiao/CSI_data/classroom_data_unit/') train_loader = load_dataset(mode='train', train_feature=train_feature, train_label=train_label, BATCHSIZE=BATCHSIZE) # 设置不同初始学习率 optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) criterion = nn.CrossEntropyLoss() EPOCH_NUM = 50 for epoch_id in range(EPOCH_NUM): acc_set = [] avg_loss_set = [] for batch_id, data in enumerate(train_loader()): # 准备数据,变得更加简洁 image_data, label_data = data image = torch.from_numpy(image_data) label = torch.from_numpy(label_data).squeeze() # 清除梯度 optimizer.zero_grad() # 前向计算的过程 predict = model(image) # 计算损失,取一个批次样本损失的平均值 loss = criterion(predict, label) # 准确率 _, predicted = torch.max(predict, 1) acc = (predicted == label).sum().item() / BATCHSIZE acc_set.append(acc) avg_loss_set.append(float(loss.detach().numpy())) # # 每训练了200批次的数据,打印下当前Loss的情况 # if batch_id % 2 == 0: # print("epoch: {}, batch: {}, loss is: {}, acc is: {}".format(epoch_id, batch_id, loss.detach().numpy(), # acc)) # 后向传播,更新参数的过程 loss.backward() optimizer.step() # 计算多个batch的平均损失和准确率 acc_val_mean = np.array(acc_set).mean() avg_loss_val_mean = np.array(avg_loss_set).mean() print('epoch: {}, loss={}, acc={}'.format(epoch_id, avg_loss_val_mean, acc_val_mean)) # 保存模型参数 PATH = '../model/gesture_recognition_3-4.pth' torch.save(model.state_dict(), PATH) model = CNN() model.load_state_dict(torch.load(PATH)) print('test......') model.eval() test_loader = load_dataset(mode='test', test_feature=test_feature, test_label=test_label, BATCHSIZE=BATCHSIZE) acc_set = [] avg_loss_set = [] for batch_id, data in enumerate(test_loader()): images, labels = data image = torch.from_numpy(images) label = torch.from_numpy(labels).squeeze() outputs = model(image) loss = F.cross_entropy(outputs, label) _, predicted = torch.max(outputs, 1) acc = (predicted == label).sum().item() / BATCHSIZE acc_set.append(acc) avg_loss_set.append(float(loss.detach().numpy())) # 计算多个batch的平均损失和准确率 acc_val_mean = np.array(acc_set).mean() avg_loss_val_mean = np.array(avg_loss_set).mean() print('loss={}, acc={}'.format(avg_loss_val_mean, acc_val_mean)) # 81*3*3 # loss=0.6554504831631979, acc=0.899999996026357 # loss=0.659913182258606, acc=0.8999999999999999 # 81*30 # BatchSize = 50 epoch = 30 loss=0.6156755884488424, acc=0.9533333333333333 # BatchSize = 50 epoch = 50 loss=0.5701029102007548, acc=0.9933333333333333 # BatchSize = 15 epoch = 50 loss=0.5590923130512238, acc=0.9933333333333334 # BatchSize = 15 epoch = 50 loss=0.5587734162807465, acc=0.9933333333333334 # BatchSize = 15 epoch = 50 loss=0.5524427711963653, acc=1.0 # 不同人划分训练集测试集 # DX测试,其他训练 # 训练集 epoch: 49, batch: 22, loss is: 0.5519987940788269, acc is: 1.0 # 测试集 loss=0.967512023448944, acc=0.5866666666666667 # LJP测试 # epoch: 49, batch: 16, loss is: 0.7997506260871887, acc is: 0.7333333333333333 # epoch: 49, batch: 18, loss is: 0.8115711212158203, acc is: 0.7333333333333333 # epoch: 49, batch: 20, loss is: 0.8097754120826721, acc is: 0.7333333333333333 # epoch: 49, batch: 22, loss is: 1.0042685270309448, acc is: 0.5333333333333333 # loss = 0.9660914719104767, acc = 0.6266666666666666 # epoch: 49, batch: 22, loss is: 0.5522249937057495, acc is: 1.0 # loss = 1.2215073466300965, acc = 0.33333333333333337 # LZW测试 # epoch: 49, loss = 0.551763728260994, acc = 0.9722222222222223 # loss = 1.2158974289894104, acc = 0.33333333333333337
print(datetime.datetime.now()) # * 整合所有样本,乱序,分割 # 整理数据集
random_line_split
gesture_recognition_3.4.py
# -*- encoding: utf-8 -*- ''' @File : gesture_recognition_3.4.py @Time : 2021/07/18 19:09:12 @Author : Yu Xiao 于潇 @Version : 1.0 @Contact : superyuxiao@icloud.com @License : (C)Copyright 2020-2021, Key Laboratory of University Wireless Communication Beijing University of Posts and Telecommunications @Desc : None ''' # ------------------------------ file details ------------------------------ # # 四个人,一个位置,巴特沃斯低通,PCA,九个天线对,81*9输入CNN # 使用pytorch重构 # 创建自己的数据集,但是速度特别特别特别慢 # 四个人,一个位置,巴特沃斯低通,30路子载波,一个天线对,81*30输入CNN。修改了网络,添加了一个全连接层。 # (模型不收敛可能是全连接层的输入输出分配不好,也可能是学习率的问题,目前0.001) # 按不同人划分训练集和测试集 # ------------------------------ file details ------------------------------ # # 加载相关库 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision import torchvision.transforms as transforms import numpy as np from sklearn.utils import shuffle import datetime from preprocessing import mul_subcarries # 定义数据集读取器 def load_data(filepath=None): # ! 读取数据文件 # * 读取数据 feature_number = 81 * 30 # ! DX # 手势O,位置1 filepath_O_1 = filepath + 'DX/O/gresture_O_location_1_' csi_DX_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 手势X,位置1 filepath_X_1 = filepath + 'DX/X/gresture_X_location_1_' csi_DX_X_1 = mul_subcarries(filepath_X_1, feature_number, 1) # 手势PO,位置1 filepath_PO_1 = filepath + 'DX/PO/gresture_PO_location_1_' csi_DX_PO_1 = mul_subcarries(filepath_PO_1, feature_number, 2) # 整合 csi_DX_1 = np.array((csi_DX_O_1, csi_DX_X_1, csi_DX_PO_1)) csi_DX_1 = np.reshape(csi_DX_1, (-1, feature_number + 1)) # ! 注意修改 print(datetime.datetime.now()) # ! LJP # 手势O,位置1 filepath_O_1 = filepath + 'LJP/O/gresture_O_location_1_' csi_LJP_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 手势X,位置1 filepath_X_1 = filepath + 'LJP/X/gresture_X_location_1_' csi_LJP_X_1 = mul_subcarries(filepath_X_1, feature_number, 1) # 手势PO,位置1 filepath_PO_1 = filepath + 'LJP/PO/gresture_PO_location_1_' csi_LJP_PO_1 = mul_subcarries(filepath_PO_1, feature_number, 2) # 整合 csi_LJP_1 = np.array((csi_LJP_O_1, csi_LJP_X_1, csi_LJP_PO_1)) csi_LJP_1 = np.reshape(csi_LJP_1, (-1, feature_number + 1)) print(datetime.datetime.now()) # ! LZW # 手势O,位置1 filepath_O_1 = filepath + 'LZW/O/gresture_O_location_1_' csi_LZW_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 手势X,位置1 filepath_X_1 = filepath + 'LZW/X/gresture_X_location_1_' csi_LZW_X_1 = mul_subcarries(filepath_X_1, feature_number, 1) # 手势PO,位置1 filepath_PO_1 = filepath + 'LZW/PO/gresture_PO_location_1_' csi_LZW_PO_1 = mul_subcarries(filepath_PO_1, feature_number, 2) # 整合 csi_LZW_1 = np.array((csi_LZW_O_1, csi_LZW_X_1, csi_LZW_PO_1)) csi_LZW_1 = np.reshape(csi_LZW_1, (-1, feature_number + 1)) print(datetime.datetime.now()) # ! MYW # 手势O,位置1 # ? 只有手势O filepath_O_1 = filepath + 'MYW/O/gresture_O_location_1_' csi_MYW_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 整合 csi_MYW_1 = np.array((csi_MYW_O_1)) csi_MYW_1 = np.reshape(csi_MYW_1, (-1, feature_number + 1)) print(datetime.datetime.now()) # * 整合所有样本,乱序,分割 # 整理数据集 csi_1 = np.array((csi_LJP_1, csi_DX_1)) csi_1 = np.reshape(csi_1, (-1, feature_number + 1)) csi_1 = np.append(csi_1, csi_MYW_1, axis=0) csi_1 = np.reshape(csi_1, (-1, feature_number + 1)) # 分割特征和标签 train_feature, train_label = np.split(csi_1, (feature_number,), axis=1) test_feature, test_label = np.split(csi_LZW_1, (feature_number,), axis=1) train_feature, train_label = shuffle(train_feature, train_label, random_state=1) test_feature, test_label = shuffle(test_feature, test_label, random_state=1) # feature, label = np.split(csi_1, (feature_number,), # axis=1) # feature(150,5),label(150,1) #pylint: disable=unbalanced-tuple-unpacking #防止出现一条警告 # # 划分训练集和测试集 # train_feature, test_feature, train_label, test_label = train_test_split(feature, label, random_state=1, # test_size=0.3) return train_feature, test_feature, train_label, test_label def load_dataset(mode='train', train_feature=None, test_feature=None, train_label=None, test_label=None, BATCHSIZE=15): # 根据输入mode参数决定使用训练集,验证集还是测试 if mode == 'train': imgs = train_feature labels = train_label elif mode == 'test': imgs = test_feature labels = test_label # 获得所有图像的数量 imgs_length = len(imgs) index_list = list(range(imgs_length)) # 定义数据生成器 def data_generator(): imgs_list = [] labels_list = [] # 按照索引读取数据 for i in index_list: # 读取图像和标签,转换其尺寸和类型 img = np.reshape(imgs[i], [1, 81, 30]).astype('float32') label = np.reshape(labels[i], [1]).astype('int64') imgs_list.append(img) labels_list.append(label) # 如果当前数据缓存达到了batch size,就返回一个批次数据 if len(imgs_list) == BATCHSIZE: yield np.array(imgs_list), np.array(labels_list) # 清空数据缓存列表 imgs_list = [] labels_list = [] # 如果剩余数据的数目小于BATCHSIZE, # 则剩余数据一起构成一个大小为l
) # 定义一层全连接层,输出维度是10 self.fc2 = nn.Linear(in_features=96, out_features=3) # 定义网络前向计算过程,卷积后紧接着使用池化层,最后使用全连接层计算最终输出 # 卷积层激活函数使用Relu,全连接层激活函数使用softmax def forward(self, inputs): x = self.conv1(inputs) x = F.relu(x) x = self.max_pool1(x) x = self.conv2(x) x = F.relu(x) x = self.max_pool2(x) x = x.view([x.shape[0], 2880]) x = self.fc1(x) x = self.fc2(x) x = F.softmax(x, dim=1) return x if __name__ == '__main__': # 仅优化算法的设置有所差别 model = CNN() model.train() params = list(model.parameters()) BATCHSIZE = 15 # 调用加载数据的函数 train_feature, test_feature, train_label, test_label = load_data('/Users/yuxiao/CSI_data/classroom_data_unit/') train_loader = load_dataset(mode='train', train_feature=train_feature, train_label=train_label, BATCHSIZE=BATCHSIZE) # 设置不同初始学习率 optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) criterion = nn.CrossEntropyLoss() EPOCH_NUM = 50 for epoch_id in range(EPOCH_NUM): acc_set = [] avg_loss_set = [] for batch_id, data in enumerate(train_loader()): # 准备数据,变得更加简洁 image_data, label_data = data image = torch.from_numpy(image_data) label = torch.from_numpy(label_data).squeeze() # 清除梯度 optimizer.zero_grad() # 前向计算的过程 predict = model(image) # 计算损失,取一个批次样本损失的平均值 loss = criterion(predict, label) # 准确率 _, predicted = torch.max(predict, 1) acc = (predicted == label).sum().item() / BATCHSIZE acc_set.append(acc) avg_loss_set.append(float(loss.detach().numpy())) # # 每训练了200批次的数据,打印下当前Loss的情况 # if batch_id % 2 == 0: # print("epoch: {}, batch: {}, loss is: {}, acc is: {}".format(epoch_id, batch_id, loss.detach().numpy(), # acc)) # 后向传播,更新参数的过程 loss.backward() optimizer.step() # 计算多个batch的平均损失和准确率 acc_val_mean = np.array(acc_set).mean() avg_loss_val_mean = np.array(avg_loss_set).mean() print('epoch: {}, loss={}, acc={}'.format(epoch_id, avg_loss_val_mean, acc_val_mean)) # 保存模型参数 PATH = '../model/gesture_recognition_3-4.pth' torch.save(model.state_dict(), PATH) model = CNN() model.load_state_dict(torch.load(PATH)) print('test......') model.eval() test_loader = load_dataset(mode='test', test_feature=test_feature, test_label=test_label, BATCHSIZE=BATCHSIZE) acc_set = [] avg_loss_set = [] for batch_id, data in enumerate(test_loader()): images, labels = data image = torch.from_numpy(images) label = torch.from_numpy(labels).squeeze() outputs = model(image) loss = F.cross_entropy(outputs, label) _, predicted = torch.max(outputs, 1) acc = (predicted == label).sum().item() / BATCHSIZE acc_set.append(acc) avg_loss_set.append(float(loss.detach().numpy())) # 计算多个batch的平均损失和准确率 acc_val_mean = np.array(acc_set).mean() avg_loss_val_mean = np.array(avg_loss_set).mean() print('loss={}, acc={}'.format(avg_loss_val_mean, acc_val_mean)) # 81*3*3 # loss=0.6554504831631979, acc=0.899999996026357 # loss=0.659913182258606, acc=0.8999999999999999 # 81*30 # BatchSize = 50 epoch = 30 loss=0.6156755884488424, acc=0.9533333333333333 # BatchSize = 50 epoch = 50 loss=0.5701029102007548, acc=0.9933333333333333 # BatchSize = 15 epoch = 50 loss=0.5590923130512238, acc=0.9933333333333334 # BatchSize = 15 epoch = 50 loss=0.5587734162807465, acc=0.9933333333333334 # BatchSize = 15 epoch = 50 loss=0.5524427711963653, acc=1.0 # 不同人划分训练集测试集 # DX测试,其他训练 # 训练集 epoch: 49, batch: 22, loss is: 0.5519987940788269, acc is: 1.0 # 测试集 loss=0.967512023448944, acc=0.5866666666666667 # LJP测试 # epoch: 49, batch: 16, loss is: 0.7997506260871887, acc is: 0.7333333333333333 # epoch: 49, batch: 18, loss is: 0.8115711212158203, acc is: 0.7333333333333333 # epoch: 49, batch: 20, loss is: 0.8097754120826721, acc is: 0.7333333333333333 # epoch: 49, batch: 22, loss is: 1.0042685270309448, acc is: 0.5333333333333333 # loss = 0.9660914719104767, acc = 0.6266666666666666 # epoch: 49, batch: 22, loss is: 0.5522249937057495, acc is: 1.0 # loss = 1.2215073466300965, acc = 0.33333333333333337 # LZW测试 # epoch: 49, loss = 0.551763728260994, acc = 0.9722222222222223 # loss = 1.2158974289894104, acc = 0.33333333333333337
en(imgs_list)的mini-batch if len(imgs_list) > 0: yield np.array(imgs_list), np.array(labels_list) return data_generator # 定义模型结构 class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() # 定义卷积层,输出特征通道out_channels设置为20,卷积核的大小kernel_size为5,卷积步长stride=1,padding=2 self.conv1 = nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5, stride=1, padding=5) # 定义池化层,池化核的大小kernel_size为2,池化步长为2 self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2) # 定义卷积层,输出特征通道out_channels设置为20,卷积核的大小kernel_size为5,卷积步长stride=1,padding=2 self.conv2 = nn.Conv2d(in_channels=10, out_channels=10, kernel_size=5, stride=1, padding=5) # 定义池化层,池化核的大小kernel_size为2,池化步长为2 self.max_pool2 = nn.MaxPool2d(kernel_size=2, stride=2) # 定义一层全连接层,输出维度是10 self.fc1 = nn.Linear(in_features=2880, out_features=96
identifier_body
gesture_recognition_3.4.py
# -*- encoding: utf-8 -*- ''' @File : gesture_recognition_3.4.py @Time : 2021/07/18 19:09:12 @Author : Yu Xiao 于潇 @Version : 1.0 @Contact : superyuxiao@icloud.com @License : (C)Copyright 2020-2021, Key Laboratory of University Wireless Communication Beijing University of Posts and Telecommunications @Desc : None ''' # ------------------------------ file details ------------------------------ # # 四个人,一个位置,巴特沃斯低通,PCA,九个天线对,81*9输入CNN # 使用pytorch重构 # 创建自己的数据集,但是速度特别特别特别慢 # 四个人,一个位置,巴特沃斯低通,30路子载波,一个天线对,81*30输入CNN。修改了网络,添加了一个全连接层。 # (模型不收敛可能是全连接层的输入输出分配不好,也可能是学习率的问题,目前0.001) # 按不同人划分训练集和测试集 # ------------------------------ file details ------------------------------ # # 加载相关库 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision import torchvision.transforms as transforms import numpy as np from sklearn.utils import shuffle import datetime from preprocessing import mul_subcarries # 定义数据集读取器 def load_data(filepath=None): # ! 读取数据文件 # * 读取数据 feature_number = 81 * 30 # ! DX # 手势O,位置1 filepath_O_1 = filepath + 'DX/O/gresture_O_location_1_' csi_DX_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 手势X,位置1 filepath_X_1 = filepath + 'DX/X/gresture_X_location_1_' csi_DX_X_1 = mul_subcarries(filepath_X_1, feature_number, 1) # 手势PO,位置1 filepath_PO_1 = filepath + 'DX/PO/gresture_PO_location_1_' csi_DX_PO_1 = mul_subcarries(filepath_PO_1, feature_number, 2) # 整合 csi_DX_1 = np.array((csi_DX_O_1, csi_DX_X_1, csi_DX_PO_1)) csi_DX_1 = np.reshape(csi_DX_1, (-1, feature_number + 1)) # ! 注意修改 print(datetime.datetime.now()) # ! LJP # 手势O,位置1 filepath_O_1 = filepath + 'LJP/O/gresture_O_location_1_' csi_LJP_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 手势X,位置1 filepath_X_1 = filepath + 'LJP/X/gresture_X_location_1_' csi_LJP_X_1 = mul_subcarries(filepath_X_1, feature_number, 1) # 手势PO,位置1 filepath_PO_1 = filepath + 'LJP/PO/gresture_PO_location_1_' csi_LJP_PO_1 = mul_subcarries(filepath_PO_1, feature_number, 2) # 整合 csi_LJP_1 = np.array((csi_LJP_O_1, csi_LJP_X_1, csi_LJP_PO_1)) csi_LJP_1 = np.reshape(csi_LJP_1, (-1, feature_number + 1)) print(datetime.datetime.now()) # ! LZW # 手势O,位置1 filepath_O_1 = filepath + 'LZW/O/gresture_O_location_1_' csi_LZW_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 手势X,位置1 filepath_X_1 = filepath + 'LZW/X/gresture_X_location_1_' csi_LZW_X_1 = mul_subcarries(filepath_X_1, feature_number, 1) # 手势PO,位置1 filepath_PO_1 = filepath + 'LZW/PO/gresture_PO_location_1_' csi_LZW_PO_1 = mul_subcarries(filepath_PO_1, feature_number, 2) # 整合 csi_LZW_1 = np.array((csi_LZW_O_1, csi_LZW_X_1, csi_LZW_PO_1)) csi_LZW_1 = np.reshape(csi_LZW_1, (-1, feature_number + 1)) print(datetime.datetime.now()) # ! MYW # 手势O,位置1 # ? 只有手势O filepath_O_1 = filepath + 'MYW/O/gresture_O_location_1_' csi_MYW_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 整合 csi_MYW_1 = np.array((csi_MYW_O_1)) csi_MYW_1 = np.reshape(csi_MYW_1, (-1, feature_number + 1)) print(datetime.datetime.now()) # * 整合所有样本,乱序,分割 # 整理数据集 csi_1 = np.array((csi_LJP_1, csi_DX_1)) csi_1 = np.reshape(csi_1, (-1, feature_number + 1)) csi_1 = np.append(csi_1, csi_MYW_1, axis=0) csi_1 = np.reshape(csi_1, (-1, feature_number + 1)) # 分割特征和标签 train_feature, train_label = np.split(csi_1, (feature_number,), axis=1) test_feature, test_label = np.split(csi_LZW_1, (feature_number,), axis=1) train_feature, train_label = shuffle(train_feature, train_label, random_state=1) test_feature, test_label = shuffle(test_feature, test_label, random_state=1) # feature, label = np.split(csi_1, (feature_number,), # axis=1) # feature(150,5),label(150,1) #pylint: disable=unbalanced-tuple-unpacking #防止出现一条警告 # # 划分训练集和测试集 # train_feature, test_feature, train_label, test_label = train_test_split(feature, label, random_state=1, # test_size=0.3) return train_feature, test_feature, train_label, test_label def load_dataset(mode='train', train_feature=None, test_feature=None, train_label=None, test_label=None, BATCHSIZE=15): # 根据输入mode参数决定使用训练集,验证集还是测试 if mode == 'train': imgs = train_feature labels = train_label elif mode == 'test': imgs = test_feature labels = test_label # 获得所有图像的数量 imgs_length = len(imgs) index_list = list(range(imgs_length)) # 定义数据生成器 def data_generator(): imgs_list = [] labels_list = [] # 按照索引读取数据 for i in index_list: # 读取图像和标签,转换其尺寸和类型 img = np.reshape(imgs[i], [1, 81, 30]).astype('float32') label = np.reshape(labels[i], [1]).astype('int64') imgs_list.append(img) labels_list.append(label) # 如果当前数据缓存达到了batch size,就返回一个批次数据 if len(imgs_list) == BATCHSIZE
.array(labels_list) # 清空数据缓存列表 imgs_list = [] labels_list = [] # 如果剩余数据的数目小于BATCHSIZE, # 则剩余数据一起构成一个大小为len(imgs_list)的mini-batch if len(imgs_list) > 0: yield np.array(imgs_list), np.array(labels_list) return data_generator # 定义模型结构 class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() # 定义卷积层,输出特征通道out_channels设置为20,卷积核的大小kernel_size为5,卷积步长stride=1,padding=2 self.conv1 = nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5, stride=1, padding=5) # 定义池化层,池化核的大小kernel_size为2,池化步长为2 self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2) # 定义卷积层,输出特征通道out_channels设置为20,卷积核的大小kernel_size为5,卷积步长stride=1,padding=2 self.conv2 = nn.Conv2d(in_channels=10, out_channels=10, kernel_size=5, stride=1, padding=5) # 定义池化层,池化核的大小kernel_size为2,池化步长为2 self.max_pool2 = nn.MaxPool2d(kernel_size=2, stride=2) # 定义一层全连接层,输出维度是10 self.fc1 = nn.Linear(in_features=2880, out_features=96) # 定义一层全连接层,输出维度是10 self.fc2 = nn.Linear(in_features=96, out_features=3) # 定义网络前向计算过程,卷积后紧接着使用池化层,最后使用全连接层计算最终输出 # 卷积层激活函数使用Relu,全连接层激活函数使用softmax def forward(self, inputs): x = self.conv1(inputs) x = F.relu(x) x = self.max_pool1(x) x = self.conv2(x) x = F.relu(x) x = self.max_pool2(x) x = x.view([x.shape[0], 2880]) x = self.fc1(x) x = self.fc2(x) x = F.softmax(x, dim=1) return x if __name__ == '__main__': # 仅优化算法的设置有所差别 model = CNN() model.train() params = list(model.parameters()) BATCHSIZE = 15 # 调用加载数据的函数 train_feature, test_feature, train_label, test_label = load_data('/Users/yuxiao/CSI_data/classroom_data_unit/') train_loader = load_dataset(mode='train', train_feature=train_feature, train_label=train_label, BATCHSIZE=BATCHSIZE) # 设置不同初始学习率 optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) criterion = nn.CrossEntropyLoss() EPOCH_NUM = 50 for epoch_id in range(EPOCH_NUM): acc_set = [] avg_loss_set = [] for batch_id, data in enumerate(train_loader()): # 准备数据,变得更加简洁 image_data, label_data = data image = torch.from_numpy(image_data) label = torch.from_numpy(label_data).squeeze() # 清除梯度 optimizer.zero_grad() # 前向计算的过程 predict = model(image) # 计算损失,取一个批次样本损失的平均值 loss = criterion(predict, label) # 准确率 _, predicted = torch.max(predict, 1) acc = (predicted == label).sum().item() / BATCHSIZE acc_set.append(acc) avg_loss_set.append(float(loss.detach().numpy())) # # 每训练了200批次的数据,打印下当前Loss的情况 # if batch_id % 2 == 0: # print("epoch: {}, batch: {}, loss is: {}, acc is: {}".format(epoch_id, batch_id, loss.detach().numpy(), # acc)) # 后向传播,更新参数的过程 loss.backward() optimizer.step() # 计算多个batch的平均损失和准确率 acc_val_mean = np.array(acc_set).mean() avg_loss_val_mean = np.array(avg_loss_set).mean() print('epoch: {}, loss={}, acc={}'.format(epoch_id, avg_loss_val_mean, acc_val_mean)) # 保存模型参数 PATH = '../model/gesture_recognition_3-4.pth' torch.save(model.state_dict(), PATH) model = CNN() model.load_state_dict(torch.load(PATH)) print('test......') model.eval() test_loader = load_dataset(mode='test', test_feature=test_feature, test_label=test_label, BATCHSIZE=BATCHSIZE) acc_set = [] avg_loss_set = [] for batch_id, data in enumerate(test_loader()): images, labels = data image = torch.from_numpy(images) label = torch.from_numpy(labels).squeeze() outputs = model(image) loss = F.cross_entropy(outputs, label) _, predicted = torch.max(outputs, 1) acc = (predicted == label).sum().item() / BATCHSIZE acc_set.append(acc) avg_loss_set.append(float(loss.detach().numpy())) # 计算多个batch的平均损失和准确率 acc_val_mean = np.array(acc_set).mean() avg_loss_val_mean = np.array(avg_loss_set).mean() print('loss={}, acc={}'.format(avg_loss_val_mean, acc_val_mean)) # 81*3*3 # loss=0.6554504831631979, acc=0.899999996026357 # loss=0.659913182258606, acc=0.8999999999999999 # 81*30 # BatchSize = 50 epoch = 30 loss=0.6156755884488424, acc=0.9533333333333333 # BatchSize = 50 epoch = 50 loss=0.5701029102007548, acc=0.9933333333333333 # BatchSize = 15 epoch = 50 loss=0.5590923130512238, acc=0.9933333333333334 # BatchSize = 15 epoch = 50 loss=0.5587734162807465, acc=0.9933333333333334 # BatchSize = 15 epoch = 50 loss=0.5524427711963653, acc=1.0 # 不同人划分训练集测试集 # DX测试,其他训练 # 训练集 epoch: 49, batch: 22, loss is: 0.5519987940788269, acc is: 1.0 # 测试集 loss=0.967512023448944, acc=0.5866666666666667 # LJP测试 # epoch: 49, batch: 16, loss is: 0.7997506260871887, acc is: 0.7333333333333333 # epoch: 49, batch: 18, loss is: 0.8115711212158203, acc is: 0.7333333333333333 # epoch: 49, batch: 20, loss is: 0.8097754120826721, acc is: 0.7333333333333333 # epoch: 49, batch: 22, loss is: 1.0042685270309448, acc is: 0.5333333333333333 # loss = 0.9660914719104767, acc = 0.6266666666666666 # epoch: 49, batch: 22, loss is: 0.5522249937057495, acc is: 1.0 # loss = 1.2215073466300965, acc = 0.33333333333333337 # LZW测试 # epoch: 49, loss = 0.551763728260994, acc = 0.9722222222222223 # loss = 1.2158974289894104, acc = 0.33333333333333337
: yield np.array(imgs_list), np
conditional_block
gesture_recognition_3.4.py
# -*- encoding: utf-8 -*- ''' @File : gesture_recognition_3.4.py @Time : 2021/07/18 19:09:12 @Author : Yu Xiao 于潇 @Version : 1.0 @Contact : superyuxiao@icloud.com @License : (C)Copyright 2020-2021, Key Laboratory of University Wireless Communication Beijing University of Posts and Telecommunications @Desc : None ''' # ------------------------------ file details ------------------------------ # # 四个人,一个位置,巴特沃斯低通,PCA,九个天线对,81*9输入CNN # 使用pytorch重构 # 创建自己的数据集,但是速度特别特别特别慢 # 四个人,一个位置,巴特沃斯低通,30路子载波,一个天线对,81*30输入CNN。修改了网络,添加了一个全连接层。 # (模型不收敛可能是全连接层的输入输出分配不好,也可能是学习率的问题,目前0.001) # 按不同人划分训练集和测试集 # ------------------------------ file details ------------------------------ # # 加载相关库 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision import torchvision.transforms as transforms import numpy as np from sklearn.utils import shuffle import datetime from preprocessing import mul_subcarries # 定义数据集读取器 def load_data(filepath=None): # ! 读取数据文件 # * 读取数据 feature_number = 81 * 30 # ! DX # 手势O,位置1 filepath_O_1 = filepath + 'DX/O/gresture_O_location_1_' csi_DX_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 手势X,位置1 filepath_X_1 = filepath + 'DX/X/gresture_X_location_1_' csi_DX_X_1 = mul_subcarries(filepath_X_1, feature_number, 1) # 手势PO,位置1 filepath_PO_1 = filepath + 'DX/PO/gresture_PO_location_1_' csi_DX_PO_1 = mul_subcarries(filepath_PO_1, feature_number, 2) # 整合 csi_DX_1 = np.array((csi_DX_O_1, csi_DX_X_1, csi_DX_PO_1)) csi_DX_1 = np.reshape(csi_DX_1, (-1, feature_number + 1)) # ! 注意修改 print(datetime.datetime.now()) # ! LJP # 手势O,位置1 filepath_O_1 = filepath + 'LJP/O/gresture_O_location_1_' csi_LJP_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 手势X,位置1 filepath_X_1 = filepath + 'LJP/X/gresture_X_location_1_' csi_LJP_X_1 = mul_subcarries(filepath_X_1, feature_number, 1) # 手势PO,位置1 filepath_PO_1 = filepath + 'LJP/PO/gresture_PO_location_1_' csi_LJP_PO_1 = mul_subcarries(filepath_PO_1, feature_number, 2) # 整合 csi_LJP_1 = np.array((csi_LJP_O_1, csi_LJP_X_1, csi_LJP_PO_1)) csi_LJP_1 = np.reshape(csi_LJP_1, (-1, feature_number + 1)) print(datetime.datetime.now()) # ! LZW # 手势O,位置1 filepath_O_1 = filepath + 'LZW/O/gresture_O_location_1_' csi_LZW_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 手势X,位置1 filepath_X_1 = filepath + 'LZW/X/gresture_X_location_1_' csi_LZW_X_1 = mul_subcarries(filepath_X_1, feature_number, 1) # 手势PO,位置1 filepath_PO_1 = filepath + 'LZW/PO/gresture_PO_location_1_' csi_LZW_PO_1 = mul_subcarries(filepath_PO_1, feature_number, 2) # 整合 csi_LZW_1 = np.array((csi_LZW_O_1, csi_LZW_X_1, csi_LZW_PO_1)) csi_LZW_1 = np.reshape(csi_LZW_1, (-1, feature_number + 1)) print(datetime.datetime.now()) # ! MYW # 手势O,位置1 # ? 只有手势O filepath_O_1 = filepath + 'MYW/O/gresture_O_location_1_' csi_MYW_O_1 = mul_subcarries(filepath_O_1, feature_number, 0) # 整合 csi_MYW_1 = np.array((csi_MYW_O_1)) csi_MYW_1 = np.reshape(csi_MYW_1, (-1, feature_number + 1)) print(datetime.datetime.now()) # * 整合所有样本,乱序,分割 # 整理数据集 csi_1 = np.array((csi_LJP_1, csi_DX_1)) csi_1 = np.reshape(csi_1, (-1, feature_number + 1)) csi_1 = np.append(csi_1, csi_MYW_1, axis=0) csi_1 = np.reshape(csi_1, (-1, feature_number + 1)) # 分割特征和标签 train_feature, train_label = np.split(csi_1, (feature_number,), axis=1) test_feature, test_label = np.split(csi_LZW_1, (feature_number,), axis=1) train_feature, train_label = shuffle(train_feature, train_label, random_state=1) test_feature, test_label = shuffle(test_feature, test_label, random_state=1) # feature, label = np.split(csi_1, (feature_number,), # axis=1) # feature(150,5),label(150,1) #pylint: disable=unbalanced-tuple-unpacking #防止出现一条警告 # # 划分训练集和测试集 # train_feature, test_feature, train_label, test_label = train_test_split(feature, label, random_state=1, # test_size=0.3) return train_feature, test_feature, train_label, test_label def load_dataset(mode='train', train_feature=None, test_feature=None, train_label=None, test_label=None, BATCHSIZE=15): # 根据输入mode参数决定使用训练集,验证集还是测试 if mode == 'train': imgs = train_feature labels = train_label elif mode == 'test': imgs = test_feature labels = test_label # 获得所有图像的数量 imgs_length = len(imgs) index_list = list(range(imgs_length)) # 定义数据生成器 def data_generator(): imgs_list = [] labels_list = [] # 按照索引读取数据 for i in index_list: # 读取图像和标签,转换其尺寸和类型 img = np.reshape(imgs[i], [1, 81, 30]).astype('float32') label = np.reshape(labels[i], [1]).astype('int64') imgs_list.append(img) labels_list.append(label) # 如果当前数据缓存达到了batch size,就返回一个批次数据 if len(imgs_list) == BATCHSIZE: yield np.array(imgs_list), np.array(labels_list) # 清空数据缓存列表 imgs_list = [] labels_list = [] # 如果剩余数据的数目小于BATCHSIZE
余数据一起构成一个大小为len(imgs_list)的mini-batch if len(imgs_list) > 0: yield np.array(imgs_list), np.array(labels_list) return data_generator # 定义模型结构 class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() # 定义卷积层,输出特征通道out_channels设置为20,卷积核的大小kernel_size为5,卷积步长stride=1,padding=2 self.conv1 = nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5, stride=1, padding=5) # 定义池化层,池化核的大小kernel_size为2,池化步长为2 self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2) # 定义卷积层,输出特征通道out_channels设置为20,卷积核的大小kernel_size为5,卷积步长stride=1,padding=2 self.conv2 = nn.Conv2d(in_channels=10, out_channels=10, kernel_size=5, stride=1, padding=5) # 定义池化层,池化核的大小kernel_size为2,池化步长为2 self.max_pool2 = nn.MaxPool2d(kernel_size=2, stride=2) # 定义一层全连接层,输出维度是10 self.fc1 = nn.Linear(in_features=2880, out_features=96) # 定义一层全连接层,输出维度是10 self.fc2 = nn.Linear(in_features=96, out_features=3) # 定义网络前向计算过程,卷积后紧接着使用池化层,最后使用全连接层计算最终输出 # 卷积层激活函数使用Relu,全连接层激活函数使用softmax def forward(self, inputs): x = self.conv1(inputs) x = F.relu(x) x = self.max_pool1(x) x = self.conv2(x) x = F.relu(x) x = self.max_pool2(x) x = x.view([x.shape[0], 2880]) x = self.fc1(x) x = self.fc2(x) x = F.softmax(x, dim=1) return x if __name__ == '__main__': # 仅优化算法的设置有所差别 model = CNN() model.train() params = list(model.parameters()) BATCHSIZE = 15 # 调用加载数据的函数 train_feature, test_feature, train_label, test_label = load_data('/Users/yuxiao/CSI_data/classroom_data_unit/') train_loader = load_dataset(mode='train', train_feature=train_feature, train_label=train_label, BATCHSIZE=BATCHSIZE) # 设置不同初始学习率 optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) criterion = nn.CrossEntropyLoss() EPOCH_NUM = 50 for epoch_id in range(EPOCH_NUM): acc_set = [] avg_loss_set = [] for batch_id, data in enumerate(train_loader()): # 准备数据,变得更加简洁 image_data, label_data = data image = torch.from_numpy(image_data) label = torch.from_numpy(label_data).squeeze() # 清除梯度 optimizer.zero_grad() # 前向计算的过程 predict = model(image) # 计算损失,取一个批次样本损失的平均值 loss = criterion(predict, label) # 准确率 _, predicted = torch.max(predict, 1) acc = (predicted == label).sum().item() / BATCHSIZE acc_set.append(acc) avg_loss_set.append(float(loss.detach().numpy())) # # 每训练了200批次的数据,打印下当前Loss的情况 # if batch_id % 2 == 0: # print("epoch: {}, batch: {}, loss is: {}, acc is: {}".format(epoch_id, batch_id, loss.detach().numpy(), # acc)) # 后向传播,更新参数的过程 loss.backward() optimizer.step() # 计算多个batch的平均损失和准确率 acc_val_mean = np.array(acc_set).mean() avg_loss_val_mean = np.array(avg_loss_set).mean() print('epoch: {}, loss={}, acc={}'.format(epoch_id, avg_loss_val_mean, acc_val_mean)) # 保存模型参数 PATH = '../model/gesture_recognition_3-4.pth' torch.save(model.state_dict(), PATH) model = CNN() model.load_state_dict(torch.load(PATH)) print('test......') model.eval() test_loader = load_dataset(mode='test', test_feature=test_feature, test_label=test_label, BATCHSIZE=BATCHSIZE) acc_set = [] avg_loss_set = [] for batch_id, data in enumerate(test_loader()): images, labels = data image = torch.from_numpy(images) label = torch.from_numpy(labels).squeeze() outputs = model(image) loss = F.cross_entropy(outputs, label) _, predicted = torch.max(outputs, 1) acc = (predicted == label).sum().item() / BATCHSIZE acc_set.append(acc) avg_loss_set.append(float(loss.detach().numpy())) # 计算多个batch的平均损失和准确率 acc_val_mean = np.array(acc_set).mean() avg_loss_val_mean = np.array(avg_loss_set).mean() print('loss={}, acc={}'.format(avg_loss_val_mean, acc_val_mean)) # 81*3*3 # loss=0.6554504831631979, acc=0.899999996026357 # loss=0.659913182258606, acc=0.8999999999999999 # 81*30 # BatchSize = 50 epoch = 30 loss=0.6156755884488424, acc=0.9533333333333333 # BatchSize = 50 epoch = 50 loss=0.5701029102007548, acc=0.9933333333333333 # BatchSize = 15 epoch = 50 loss=0.5590923130512238, acc=0.9933333333333334 # BatchSize = 15 epoch = 50 loss=0.5587734162807465, acc=0.9933333333333334 # BatchSize = 15 epoch = 50 loss=0.5524427711963653, acc=1.0 # 不同人划分训练集测试集 # DX测试,其他训练 # 训练集 epoch: 49, batch: 22, loss is: 0.5519987940788269, acc is: 1.0 # 测试集 loss=0.967512023448944, acc=0.5866666666666667 # LJP测试 # epoch: 49, batch: 16, loss is: 0.7997506260871887, acc is: 0.7333333333333333 # epoch: 49, batch: 18, loss is: 0.8115711212158203, acc is: 0.7333333333333333 # epoch: 49, batch: 20, loss is: 0.8097754120826721, acc is: 0.7333333333333333 # epoch: 49, batch: 22, loss is: 1.0042685270309448, acc is: 0.5333333333333333 # loss = 0.9660914719104767, acc = 0.6266666666666666 # epoch: 49, batch: 22, loss is: 0.5522249937057495, acc is: 1.0 # loss = 1.2215073466300965, acc = 0.33333333333333337 # LZW测试 # epoch: 49, loss = 0.551763728260994, acc = 0.9722222222222223 # loss = 1.2158974289894104, acc = 0.33333333333333337
, # 则剩
identifier_name
pit.rs
//! Periodic interrupt timer (PIT) driver and futures //! //! The PIT timer channels are the most precise timers in the HAL. PIT timers run on the periodic clock //! frequency. //! //! A single hardware PIT instance has four PIT channels. Use [`new`](PIT::new()) to acquire these four //! channels. //! //! # Example //! //! Delay for 250ms using PIT channel 3. //! //! ```no_run //! use imxrt_async_hal as hal; //! use hal::ral; //! use hal::PIT; //! //! let ccm = ral::ccm::CCM::take().unwrap();
//! let (_, _, _, mut pit) = ral::pit::PIT::take() //! .map(PIT::new) //! .unwrap(); //! //! # async { //! pit.delay(250_000).await; //! # }; //! ``` use crate::ral; use core::{ future::Future, marker::PhantomPinned, pin::Pin, sync::atomic, task::{Context, Poll, Waker}, }; /// Periodic interrupt timer (PIT) /// /// See the [module-level documentation](crate::pit) for more information. #[cfg_attr(docsrs, doc(cfg(feature = "pit")))] pub struct PIT { channel: register::ChannelInstance, } impl PIT { /// Acquire four PIT channels from the RAL's PIT instance pub fn new(pit: ral::pit::Instance) -> (PIT, PIT, PIT, PIT) { ral::write_reg!(ral::pit, pit, MCR, MDIS: MDIS_0); // Reset all PIT channels // // PIT channels may be used by a systems boot ROM, or another // user. Set them to a known, good state. ral::write_reg!(ral::pit, pit, TCTRL0, 0); ral::write_reg!(ral::pit, pit, TCTRL1, 0); ral::write_reg!(ral::pit, pit, TCTRL2, 0); ral::write_reg!(ral::pit, pit, TCTRL3, 0); unsafe { cortex_m::peripheral::NVIC::unmask(crate::ral::interrupt::PIT); ( PIT { channel: register::ChannelInstance::zero(), }, PIT { channel: register::ChannelInstance::one(), }, PIT { channel: register::ChannelInstance::two(), }, PIT { channel: register::ChannelInstance::three(), }, ) } } /// Wait for the counts to elapse /// /// The elapsed time is a function of your clock selection and clock frequency. pub fn delay(&mut self, count: u32) -> Delay<'_> { Delay { channel: &mut self.channel, count, _pin: PhantomPinned, } } } static mut WAKERS: [Option<Waker>; 4] = [None, None, None, None]; /// A future that yields once the PIT timer elapses pub struct Delay<'a> { channel: &'a mut register::ChannelInstance, _pin: PhantomPinned, count: u32, } impl<'a> Future for Delay<'a> { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let count = self.count; // Safety: future is safely Unpin; only exposed as !Unpin, just in case. let this = unsafe { Pin::into_inner_unchecked(self) }; poll_delay(&mut this.channel, cx, count) } } fn poll_delay( channel: &mut register::ChannelInstance, cx: &mut Context<'_>, count: u32, ) -> Poll<()> { if ral::read_reg!(register, channel, TFLG, TIF == 1) { // Complete! W1C ral::write_reg!(register, channel, TFLG, TIF: 1); Poll::Ready(()) } else if ral::read_reg!(register, channel, TCTRL) != 0 { // We're active; do nothing Poll::Pending } else { // Neither complete nor active; prepare to run ral::write_reg!(register, channel, LDVAL, count); unsafe { WAKERS[channel.index()] = Some(cx.waker().clone()); } atomic::compiler_fence(atomic::Ordering::SeqCst); ral::modify_reg!(register, channel, TCTRL, TIE: 1); ral::modify_reg!(register, channel, TCTRL, TEN: 1); Poll::Pending } } impl<'a> Drop for Delay<'a> { fn drop(&mut self) { poll_cancel(&mut self.channel); } } fn poll_cancel(channel: &mut register::ChannelInstance) { ral::write_reg!(register, channel, TCTRL, 0); } interrupts! { handler!{unsafe fn PIT() { use register::ChannelInstance; [ ChannelInstance::zero(), ChannelInstance::one(), ChannelInstance::two(), ChannelInstance::three(), ] .iter_mut() .zip(WAKERS.iter_mut()) .filter(|(channel, _)| ral::read_reg!(register, channel, TFLG, TIF == 1)) .for_each(|(channel, waker)| { ral::write_reg!(register, channel, TCTRL, 0); if let Some(waker) = waker.take() { waker.wake(); } }); }} } /// The auto-generated RAL API is cumbersome. This is a macro-compatible API that makes it /// easier to work with. /// /// The approach here is to /// /// - take the RAL flags, and remove the channel number (copy-paste from RAL) /// - expose a 'Channel' as a collection of PIT channel registers (copy-paste from RAL) mod register { #![allow(unused, non_snake_case, non_upper_case_globals)] // Compatibility with RAL use crate::ral::{RORegister, RWRegister}; #[repr(C)] pub struct ChannelRegisterBlock { /// Timer Load Value Register pub LDVAL: RWRegister<u32>, /// Current Timer Value Register pub CVAL: RORegister<u32>, /// Timer Control Register pub TCTRL: RWRegister<u32>, /// Timer Flag Register pub TFLG: RWRegister<u32>, } pub struct ChannelInstance { addr: u32, idx: usize, _marker: ::core::marker::PhantomData<*const ChannelRegisterBlock>, } impl ::core::ops::Deref for ChannelInstance { type Target = ChannelRegisterBlock; #[inline(always)] fn deref(&self) -> &ChannelRegisterBlock { unsafe { &*(self.addr as *const _) } } } const PIT_BASE_ADDRESS: u32 = 0x4008_4000; const PIT_CHANNEL_0_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x100; const PIT_CHANNEL_1_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x110; const PIT_CHANNEL_2_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x120; const PIT_CHANNEL_3_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x130; impl ChannelInstance { const unsafe fn new(addr: u32, idx: usize) -> Self { ChannelInstance { addr, idx, _marker: core::marker::PhantomData, } } pub const fn index(&self) -> usize { self.idx } pub const unsafe fn zero() -> Self { Self::new(PIT_CHANNEL_0_ADDRESS, 0) } pub const unsafe fn one() -> Self { Self::new(PIT_CHANNEL_1_ADDRESS, 1) } pub const unsafe fn two() -> Self { Self::new(PIT_CHANNEL_2_ADDRESS, 2) } pub const unsafe fn three() -> Self { Self::new(PIT_CHANNEL_3_ADDRESS, 3) } } /// Timer Load Value Register pub mod LDVAL { /// Timer Start Value pub mod TSV { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (32 bits: 0xffffffff << 0) pub const mask: u32 = 0xffffffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// Current Timer Value Register pub mod CVAL { /// Current Timer Value pub mod TVL { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (32 bits: 0xffffffff << 0) pub const mask: u32 = 0xffffffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// Timer Control Register pub mod TCTRL { /// Timer Enable pub mod TEN { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Timer n is disabled. pub const TEN_0: u32 = 0b0; /// 0b1: Timer n is enabled. pub const TEN_1: u32 = 0b1; } } /// Timer Interrupt Enable pub mod TIE { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Interrupt requests from Timer n are disabled. pub const TIE_0: u32 = 0b0; /// 0b1: Interrupt will be requested whenever TIF is set. pub const TIE_1: u32 = 0b1; } } /// Chain Mode pub mod CHN { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Timer is not chained. pub const CHN_0: u32 = 0b0; /// 0b1: Timer is chained to previous timer. For example, for Channel 2, if this field is set, Timer 2 is chained to Timer 1. pub const CHN_1: u32 = 0b1; } } } /// Timer Flag Register pub mod TFLG { /// Timer Interrupt Flag pub mod TIF { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Timeout has not yet occurred. pub const TIF_0: u32 = 0b0; /// 0b1: Timeout has occurred. pub const TIF_1: u32 = 0b1; } } } }
//! // Select 24MHz crystal oscillator, divide by 24 == 1MHz clock //! ral::modify_reg!(ral::ccm, ccm, CSCMR1, PERCLK_PODF: DIVIDE_24, PERCLK_CLK_SEL: 1); //! // Enable PIT clock gate //! ral::modify_reg!(ral::ccm, ccm, CCGR1, CG6: 0b11);
random_line_split
pit.rs
//! Periodic interrupt timer (PIT) driver and futures //! //! The PIT timer channels are the most precise timers in the HAL. PIT timers run on the periodic clock //! frequency. //! //! A single hardware PIT instance has four PIT channels. Use [`new`](PIT::new()) to acquire these four //! channels. //! //! # Example //! //! Delay for 250ms using PIT channel 3. //! //! ```no_run //! use imxrt_async_hal as hal; //! use hal::ral; //! use hal::PIT; //! //! let ccm = ral::ccm::CCM::take().unwrap(); //! // Select 24MHz crystal oscillator, divide by 24 == 1MHz clock //! ral::modify_reg!(ral::ccm, ccm, CSCMR1, PERCLK_PODF: DIVIDE_24, PERCLK_CLK_SEL: 1); //! // Enable PIT clock gate //! ral::modify_reg!(ral::ccm, ccm, CCGR1, CG6: 0b11); //! let (_, _, _, mut pit) = ral::pit::PIT::take() //! .map(PIT::new) //! .unwrap(); //! //! # async { //! pit.delay(250_000).await; //! # }; //! ``` use crate::ral; use core::{ future::Future, marker::PhantomPinned, pin::Pin, sync::atomic, task::{Context, Poll, Waker}, }; /// Periodic interrupt timer (PIT) /// /// See the [module-level documentation](crate::pit) for more information. #[cfg_attr(docsrs, doc(cfg(feature = "pit")))] pub struct PIT { channel: register::ChannelInstance, } impl PIT { /// Acquire four PIT channels from the RAL's PIT instance pub fn new(pit: ral::pit::Instance) -> (PIT, PIT, PIT, PIT) { ral::write_reg!(ral::pit, pit, MCR, MDIS: MDIS_0); // Reset all PIT channels // // PIT channels may be used by a systems boot ROM, or another // user. Set them to a known, good state. ral::write_reg!(ral::pit, pit, TCTRL0, 0); ral::write_reg!(ral::pit, pit, TCTRL1, 0); ral::write_reg!(ral::pit, pit, TCTRL2, 0); ral::write_reg!(ral::pit, pit, TCTRL3, 0); unsafe { cortex_m::peripheral::NVIC::unmask(crate::ral::interrupt::PIT); ( PIT { channel: register::ChannelInstance::zero(), }, PIT { channel: register::ChannelInstance::one(), }, PIT { channel: register::ChannelInstance::two(), }, PIT { channel: register::ChannelInstance::three(), }, ) } } /// Wait for the counts to elapse /// /// The elapsed time is a function of your clock selection and clock frequency. pub fn delay(&mut self, count: u32) -> Delay<'_> { Delay { channel: &mut self.channel, count, _pin: PhantomPinned, } } } static mut WAKERS: [Option<Waker>; 4] = [None, None, None, None]; /// A future that yields once the PIT timer elapses pub struct Delay<'a> { channel: &'a mut register::ChannelInstance, _pin: PhantomPinned, count: u32, } impl<'a> Future for Delay<'a> { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let count = self.count; // Safety: future is safely Unpin; only exposed as !Unpin, just in case. let this = unsafe { Pin::into_inner_unchecked(self) }; poll_delay(&mut this.channel, cx, count) } } fn poll_delay( channel: &mut register::ChannelInstance, cx: &mut Context<'_>, count: u32, ) -> Poll<()> { if ral::read_reg!(register, channel, TFLG, TIF == 1) { // Complete! W1C ral::write_reg!(register, channel, TFLG, TIF: 1); Poll::Ready(()) } else if ral::read_reg!(register, channel, TCTRL) != 0 { // We're active; do nothing Poll::Pending } else { // Neither complete nor active; prepare to run ral::write_reg!(register, channel, LDVAL, count); unsafe { WAKERS[channel.index()] = Some(cx.waker().clone()); } atomic::compiler_fence(atomic::Ordering::SeqCst); ral::modify_reg!(register, channel, TCTRL, TIE: 1); ral::modify_reg!(register, channel, TCTRL, TEN: 1); Poll::Pending } } impl<'a> Drop for Delay<'a> { fn drop(&mut self) { poll_cancel(&mut self.channel); } } fn poll_cancel(channel: &mut register::ChannelInstance) { ral::write_reg!(register, channel, TCTRL, 0); } interrupts! { handler!{unsafe fn PIT() { use register::ChannelInstance; [ ChannelInstance::zero(), ChannelInstance::one(), ChannelInstance::two(), ChannelInstance::three(), ] .iter_mut() .zip(WAKERS.iter_mut()) .filter(|(channel, _)| ral::read_reg!(register, channel, TFLG, TIF == 1)) .for_each(|(channel, waker)| { ral::write_reg!(register, channel, TCTRL, 0); if let Some(waker) = waker.take() { waker.wake(); } }); }} } /// The auto-generated RAL API is cumbersome. This is a macro-compatible API that makes it /// easier to work with. /// /// The approach here is to /// /// - take the RAL flags, and remove the channel number (copy-paste from RAL) /// - expose a 'Channel' as a collection of PIT channel registers (copy-paste from RAL) mod register { #![allow(unused, non_snake_case, non_upper_case_globals)] // Compatibility with RAL use crate::ral::{RORegister, RWRegister}; #[repr(C)] pub struct ChannelRegisterBlock { /// Timer Load Value Register pub LDVAL: RWRegister<u32>, /// Current Timer Value Register pub CVAL: RORegister<u32>, /// Timer Control Register pub TCTRL: RWRegister<u32>, /// Timer Flag Register pub TFLG: RWRegister<u32>, } pub struct ChannelInstance { addr: u32, idx: usize, _marker: ::core::marker::PhantomData<*const ChannelRegisterBlock>, } impl ::core::ops::Deref for ChannelInstance { type Target = ChannelRegisterBlock; #[inline(always)] fn deref(&self) -> &ChannelRegisterBlock { unsafe { &*(self.addr as *const _) } } } const PIT_BASE_ADDRESS: u32 = 0x4008_4000; const PIT_CHANNEL_0_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x100; const PIT_CHANNEL_1_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x110; const PIT_CHANNEL_2_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x120; const PIT_CHANNEL_3_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x130; impl ChannelInstance { const unsafe fn new(addr: u32, idx: usize) -> Self { ChannelInstance { addr, idx, _marker: core::marker::PhantomData, } } pub const fn index(&self) -> usize { self.idx } pub const unsafe fn zero() -> Self { Self::new(PIT_CHANNEL_0_ADDRESS, 0) } pub const unsafe fn one() -> Self { Self::new(PIT_CHANNEL_1_ADDRESS, 1) } pub const unsafe fn two() -> Self { Self::new(PIT_CHANNEL_2_ADDRESS, 2) } pub const unsafe fn
() -> Self { Self::new(PIT_CHANNEL_3_ADDRESS, 3) } } /// Timer Load Value Register pub mod LDVAL { /// Timer Start Value pub mod TSV { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (32 bits: 0xffffffff << 0) pub const mask: u32 = 0xffffffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// Current Timer Value Register pub mod CVAL { /// Current Timer Value pub mod TVL { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (32 bits: 0xffffffff << 0) pub const mask: u32 = 0xffffffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// Timer Control Register pub mod TCTRL { /// Timer Enable pub mod TEN { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Timer n is disabled. pub const TEN_0: u32 = 0b0; /// 0b1: Timer n is enabled. pub const TEN_1: u32 = 0b1; } } /// Timer Interrupt Enable pub mod TIE { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Interrupt requests from Timer n are disabled. pub const TIE_0: u32 = 0b0; /// 0b1: Interrupt will be requested whenever TIF is set. pub const TIE_1: u32 = 0b1; } } /// Chain Mode pub mod CHN { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Timer is not chained. pub const CHN_0: u32 = 0b0; /// 0b1: Timer is chained to previous timer. For example, for Channel 2, if this field is set, Timer 2 is chained to Timer 1. pub const CHN_1: u32 = 0b1; } } } /// Timer Flag Register pub mod TFLG { /// Timer Interrupt Flag pub mod TIF { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Timeout has not yet occurred. pub const TIF_0: u32 = 0b0; /// 0b1: Timeout has occurred. pub const TIF_1: u32 = 0b1; } } } }
three
identifier_name
pit.rs
//! Periodic interrupt timer (PIT) driver and futures //! //! The PIT timer channels are the most precise timers in the HAL. PIT timers run on the periodic clock //! frequency. //! //! A single hardware PIT instance has four PIT channels. Use [`new`](PIT::new()) to acquire these four //! channels. //! //! # Example //! //! Delay for 250ms using PIT channel 3. //! //! ```no_run //! use imxrt_async_hal as hal; //! use hal::ral; //! use hal::PIT; //! //! let ccm = ral::ccm::CCM::take().unwrap(); //! // Select 24MHz crystal oscillator, divide by 24 == 1MHz clock //! ral::modify_reg!(ral::ccm, ccm, CSCMR1, PERCLK_PODF: DIVIDE_24, PERCLK_CLK_SEL: 1); //! // Enable PIT clock gate //! ral::modify_reg!(ral::ccm, ccm, CCGR1, CG6: 0b11); //! let (_, _, _, mut pit) = ral::pit::PIT::take() //! .map(PIT::new) //! .unwrap(); //! //! # async { //! pit.delay(250_000).await; //! # }; //! ``` use crate::ral; use core::{ future::Future, marker::PhantomPinned, pin::Pin, sync::atomic, task::{Context, Poll, Waker}, }; /// Periodic interrupt timer (PIT) /// /// See the [module-level documentation](crate::pit) for more information. #[cfg_attr(docsrs, doc(cfg(feature = "pit")))] pub struct PIT { channel: register::ChannelInstance, } impl PIT { /// Acquire four PIT channels from the RAL's PIT instance pub fn new(pit: ral::pit::Instance) -> (PIT, PIT, PIT, PIT) { ral::write_reg!(ral::pit, pit, MCR, MDIS: MDIS_0); // Reset all PIT channels // // PIT channels may be used by a systems boot ROM, or another // user. Set them to a known, good state. ral::write_reg!(ral::pit, pit, TCTRL0, 0); ral::write_reg!(ral::pit, pit, TCTRL1, 0); ral::write_reg!(ral::pit, pit, TCTRL2, 0); ral::write_reg!(ral::pit, pit, TCTRL3, 0); unsafe { cortex_m::peripheral::NVIC::unmask(crate::ral::interrupt::PIT); ( PIT { channel: register::ChannelInstance::zero(), }, PIT { channel: register::ChannelInstance::one(), }, PIT { channel: register::ChannelInstance::two(), }, PIT { channel: register::ChannelInstance::three(), }, ) } } /// Wait for the counts to elapse /// /// The elapsed time is a function of your clock selection and clock frequency. pub fn delay(&mut self, count: u32) -> Delay<'_> { Delay { channel: &mut self.channel, count, _pin: PhantomPinned, } } } static mut WAKERS: [Option<Waker>; 4] = [None, None, None, None]; /// A future that yields once the PIT timer elapses pub struct Delay<'a> { channel: &'a mut register::ChannelInstance, _pin: PhantomPinned, count: u32, } impl<'a> Future for Delay<'a> { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let count = self.count; // Safety: future is safely Unpin; only exposed as !Unpin, just in case. let this = unsafe { Pin::into_inner_unchecked(self) }; poll_delay(&mut this.channel, cx, count) } } fn poll_delay( channel: &mut register::ChannelInstance, cx: &mut Context<'_>, count: u32, ) -> Poll<()> { if ral::read_reg!(register, channel, TFLG, TIF == 1) { // Complete! W1C ral::write_reg!(register, channel, TFLG, TIF: 1); Poll::Ready(()) } else if ral::read_reg!(register, channel, TCTRL) != 0
else { // Neither complete nor active; prepare to run ral::write_reg!(register, channel, LDVAL, count); unsafe { WAKERS[channel.index()] = Some(cx.waker().clone()); } atomic::compiler_fence(atomic::Ordering::SeqCst); ral::modify_reg!(register, channel, TCTRL, TIE: 1); ral::modify_reg!(register, channel, TCTRL, TEN: 1); Poll::Pending } } impl<'a> Drop for Delay<'a> { fn drop(&mut self) { poll_cancel(&mut self.channel); } } fn poll_cancel(channel: &mut register::ChannelInstance) { ral::write_reg!(register, channel, TCTRL, 0); } interrupts! { handler!{unsafe fn PIT() { use register::ChannelInstance; [ ChannelInstance::zero(), ChannelInstance::one(), ChannelInstance::two(), ChannelInstance::three(), ] .iter_mut() .zip(WAKERS.iter_mut()) .filter(|(channel, _)| ral::read_reg!(register, channel, TFLG, TIF == 1)) .for_each(|(channel, waker)| { ral::write_reg!(register, channel, TCTRL, 0); if let Some(waker) = waker.take() { waker.wake(); } }); }} } /// The auto-generated RAL API is cumbersome. This is a macro-compatible API that makes it /// easier to work with. /// /// The approach here is to /// /// - take the RAL flags, and remove the channel number (copy-paste from RAL) /// - expose a 'Channel' as a collection of PIT channel registers (copy-paste from RAL) mod register { #![allow(unused, non_snake_case, non_upper_case_globals)] // Compatibility with RAL use crate::ral::{RORegister, RWRegister}; #[repr(C)] pub struct ChannelRegisterBlock { /// Timer Load Value Register pub LDVAL: RWRegister<u32>, /// Current Timer Value Register pub CVAL: RORegister<u32>, /// Timer Control Register pub TCTRL: RWRegister<u32>, /// Timer Flag Register pub TFLG: RWRegister<u32>, } pub struct ChannelInstance { addr: u32, idx: usize, _marker: ::core::marker::PhantomData<*const ChannelRegisterBlock>, } impl ::core::ops::Deref for ChannelInstance { type Target = ChannelRegisterBlock; #[inline(always)] fn deref(&self) -> &ChannelRegisterBlock { unsafe { &*(self.addr as *const _) } } } const PIT_BASE_ADDRESS: u32 = 0x4008_4000; const PIT_CHANNEL_0_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x100; const PIT_CHANNEL_1_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x110; const PIT_CHANNEL_2_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x120; const PIT_CHANNEL_3_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x130; impl ChannelInstance { const unsafe fn new(addr: u32, idx: usize) -> Self { ChannelInstance { addr, idx, _marker: core::marker::PhantomData, } } pub const fn index(&self) -> usize { self.idx } pub const unsafe fn zero() -> Self { Self::new(PIT_CHANNEL_0_ADDRESS, 0) } pub const unsafe fn one() -> Self { Self::new(PIT_CHANNEL_1_ADDRESS, 1) } pub const unsafe fn two() -> Self { Self::new(PIT_CHANNEL_2_ADDRESS, 2) } pub const unsafe fn three() -> Self { Self::new(PIT_CHANNEL_3_ADDRESS, 3) } } /// Timer Load Value Register pub mod LDVAL { /// Timer Start Value pub mod TSV { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (32 bits: 0xffffffff << 0) pub const mask: u32 = 0xffffffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// Current Timer Value Register pub mod CVAL { /// Current Timer Value pub mod TVL { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (32 bits: 0xffffffff << 0) pub const mask: u32 = 0xffffffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// Timer Control Register pub mod TCTRL { /// Timer Enable pub mod TEN { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Timer n is disabled. pub const TEN_0: u32 = 0b0; /// 0b1: Timer n is enabled. pub const TEN_1: u32 = 0b1; } } /// Timer Interrupt Enable pub mod TIE { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Interrupt requests from Timer n are disabled. pub const TIE_0: u32 = 0b0; /// 0b1: Interrupt will be requested whenever TIF is set. pub const TIE_1: u32 = 0b1; } } /// Chain Mode pub mod CHN { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Timer is not chained. pub const CHN_0: u32 = 0b0; /// 0b1: Timer is chained to previous timer. For example, for Channel 2, if this field is set, Timer 2 is chained to Timer 1. pub const CHN_1: u32 = 0b1; } } } /// Timer Flag Register pub mod TFLG { /// Timer Interrupt Flag pub mod TIF { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Timeout has not yet occurred. pub const TIF_0: u32 = 0b0; /// 0b1: Timeout has occurred. pub const TIF_1: u32 = 0b1; } } } }
{ // We're active; do nothing Poll::Pending }
conditional_block
pit.rs
//! Periodic interrupt timer (PIT) driver and futures //! //! The PIT timer channels are the most precise timers in the HAL. PIT timers run on the periodic clock //! frequency. //! //! A single hardware PIT instance has four PIT channels. Use [`new`](PIT::new()) to acquire these four //! channels. //! //! # Example //! //! Delay for 250ms using PIT channel 3. //! //! ```no_run //! use imxrt_async_hal as hal; //! use hal::ral; //! use hal::PIT; //! //! let ccm = ral::ccm::CCM::take().unwrap(); //! // Select 24MHz crystal oscillator, divide by 24 == 1MHz clock //! ral::modify_reg!(ral::ccm, ccm, CSCMR1, PERCLK_PODF: DIVIDE_24, PERCLK_CLK_SEL: 1); //! // Enable PIT clock gate //! ral::modify_reg!(ral::ccm, ccm, CCGR1, CG6: 0b11); //! let (_, _, _, mut pit) = ral::pit::PIT::take() //! .map(PIT::new) //! .unwrap(); //! //! # async { //! pit.delay(250_000).await; //! # }; //! ``` use crate::ral; use core::{ future::Future, marker::PhantomPinned, pin::Pin, sync::atomic, task::{Context, Poll, Waker}, }; /// Periodic interrupt timer (PIT) /// /// See the [module-level documentation](crate::pit) for more information. #[cfg_attr(docsrs, doc(cfg(feature = "pit")))] pub struct PIT { channel: register::ChannelInstance, } impl PIT { /// Acquire four PIT channels from the RAL's PIT instance pub fn new(pit: ral::pit::Instance) -> (PIT, PIT, PIT, PIT) { ral::write_reg!(ral::pit, pit, MCR, MDIS: MDIS_0); // Reset all PIT channels // // PIT channels may be used by a systems boot ROM, or another // user. Set them to a known, good state. ral::write_reg!(ral::pit, pit, TCTRL0, 0); ral::write_reg!(ral::pit, pit, TCTRL1, 0); ral::write_reg!(ral::pit, pit, TCTRL2, 0); ral::write_reg!(ral::pit, pit, TCTRL3, 0); unsafe { cortex_m::peripheral::NVIC::unmask(crate::ral::interrupt::PIT); ( PIT { channel: register::ChannelInstance::zero(), }, PIT { channel: register::ChannelInstance::one(), }, PIT { channel: register::ChannelInstance::two(), }, PIT { channel: register::ChannelInstance::three(), }, ) } } /// Wait for the counts to elapse /// /// The elapsed time is a function of your clock selection and clock frequency. pub fn delay(&mut self, count: u32) -> Delay<'_>
} static mut WAKERS: [Option<Waker>; 4] = [None, None, None, None]; /// A future that yields once the PIT timer elapses pub struct Delay<'a> { channel: &'a mut register::ChannelInstance, _pin: PhantomPinned, count: u32, } impl<'a> Future for Delay<'a> { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let count = self.count; // Safety: future is safely Unpin; only exposed as !Unpin, just in case. let this = unsafe { Pin::into_inner_unchecked(self) }; poll_delay(&mut this.channel, cx, count) } } fn poll_delay( channel: &mut register::ChannelInstance, cx: &mut Context<'_>, count: u32, ) -> Poll<()> { if ral::read_reg!(register, channel, TFLG, TIF == 1) { // Complete! W1C ral::write_reg!(register, channel, TFLG, TIF: 1); Poll::Ready(()) } else if ral::read_reg!(register, channel, TCTRL) != 0 { // We're active; do nothing Poll::Pending } else { // Neither complete nor active; prepare to run ral::write_reg!(register, channel, LDVAL, count); unsafe { WAKERS[channel.index()] = Some(cx.waker().clone()); } atomic::compiler_fence(atomic::Ordering::SeqCst); ral::modify_reg!(register, channel, TCTRL, TIE: 1); ral::modify_reg!(register, channel, TCTRL, TEN: 1); Poll::Pending } } impl<'a> Drop for Delay<'a> { fn drop(&mut self) { poll_cancel(&mut self.channel); } } fn poll_cancel(channel: &mut register::ChannelInstance) { ral::write_reg!(register, channel, TCTRL, 0); } interrupts! { handler!{unsafe fn PIT() { use register::ChannelInstance; [ ChannelInstance::zero(), ChannelInstance::one(), ChannelInstance::two(), ChannelInstance::three(), ] .iter_mut() .zip(WAKERS.iter_mut()) .filter(|(channel, _)| ral::read_reg!(register, channel, TFLG, TIF == 1)) .for_each(|(channel, waker)| { ral::write_reg!(register, channel, TCTRL, 0); if let Some(waker) = waker.take() { waker.wake(); } }); }} } /// The auto-generated RAL API is cumbersome. This is a macro-compatible API that makes it /// easier to work with. /// /// The approach here is to /// /// - take the RAL flags, and remove the channel number (copy-paste from RAL) /// - expose a 'Channel' as a collection of PIT channel registers (copy-paste from RAL) mod register { #![allow(unused, non_snake_case, non_upper_case_globals)] // Compatibility with RAL use crate::ral::{RORegister, RWRegister}; #[repr(C)] pub struct ChannelRegisterBlock { /// Timer Load Value Register pub LDVAL: RWRegister<u32>, /// Current Timer Value Register pub CVAL: RORegister<u32>, /// Timer Control Register pub TCTRL: RWRegister<u32>, /// Timer Flag Register pub TFLG: RWRegister<u32>, } pub struct ChannelInstance { addr: u32, idx: usize, _marker: ::core::marker::PhantomData<*const ChannelRegisterBlock>, } impl ::core::ops::Deref for ChannelInstance { type Target = ChannelRegisterBlock; #[inline(always)] fn deref(&self) -> &ChannelRegisterBlock { unsafe { &*(self.addr as *const _) } } } const PIT_BASE_ADDRESS: u32 = 0x4008_4000; const PIT_CHANNEL_0_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x100; const PIT_CHANNEL_1_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x110; const PIT_CHANNEL_2_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x120; const PIT_CHANNEL_3_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x130; impl ChannelInstance { const unsafe fn new(addr: u32, idx: usize) -> Self { ChannelInstance { addr, idx, _marker: core::marker::PhantomData, } } pub const fn index(&self) -> usize { self.idx } pub const unsafe fn zero() -> Self { Self::new(PIT_CHANNEL_0_ADDRESS, 0) } pub const unsafe fn one() -> Self { Self::new(PIT_CHANNEL_1_ADDRESS, 1) } pub const unsafe fn two() -> Self { Self::new(PIT_CHANNEL_2_ADDRESS, 2) } pub const unsafe fn three() -> Self { Self::new(PIT_CHANNEL_3_ADDRESS, 3) } } /// Timer Load Value Register pub mod LDVAL { /// Timer Start Value pub mod TSV { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (32 bits: 0xffffffff << 0) pub const mask: u32 = 0xffffffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// Current Timer Value Register pub mod CVAL { /// Current Timer Value pub mod TVL { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (32 bits: 0xffffffff << 0) pub const mask: u32 = 0xffffffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// Timer Control Register pub mod TCTRL { /// Timer Enable pub mod TEN { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Timer n is disabled. pub const TEN_0: u32 = 0b0; /// 0b1: Timer n is enabled. pub const TEN_1: u32 = 0b1; } } /// Timer Interrupt Enable pub mod TIE { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Interrupt requests from Timer n are disabled. pub const TIE_0: u32 = 0b0; /// 0b1: Interrupt will be requested whenever TIF is set. pub const TIE_1: u32 = 0b1; } } /// Chain Mode pub mod CHN { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Timer is not chained. pub const CHN_0: u32 = 0b0; /// 0b1: Timer is chained to previous timer. For example, for Channel 2, if this field is set, Timer 2 is chained to Timer 1. pub const CHN_1: u32 = 0b1; } } } /// Timer Flag Register pub mod TFLG { /// Timer Interrupt Flag pub mod TIF { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Timeout has not yet occurred. pub const TIF_0: u32 = 0b0; /// 0b1: Timeout has occurred. pub const TIF_1: u32 = 0b1; } } } }
{ Delay { channel: &mut self.channel, count, _pin: PhantomPinned, } }
identifier_body
transform_provider.go
package terraform import ( "errors" "fmt" "log" "strings" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/dag" ) func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, mod *module.Tree) GraphTransformer { return GraphTransformMulti( // Add providers from the config &ProviderConfigTransformer{ Module: mod, Providers: providers, Concrete: concrete, }, // Add any remaining missing providers &MissingProviderTransformer{ Providers: providers, Concrete: concrete, }, // Connect the providers &ProviderTransformer{}, // Remove unused providers and proxies &PruneProviderTransformer{}, // Connect provider to their parent provider nodes &ParentProviderTransformer{}, ) } // GraphNodeProvider is an interface that nodes that can be a provider // must implement. // ProviderName returns the name of the provider this satisfies. // Name returns the full name of the provider in the config. type GraphNodeProvider interface { ProviderName() string Name() string } // GraphNodeCloseProvider is an interface that nodes that can be a close // provider must implement. The CloseProviderName returned is the name of // the provider they satisfy. type GraphNodeCloseProvider interface { CloseProviderName() string } // GraphNodeProviderConsumer is an interface that nodes that require // a provider must implement. ProvidedBy must return the name of the provider // to use. This may be a provider by type, type.alias or a fully resolved // provider name type GraphNodeProviderConsumer interface { ProvidedBy() string // Set the resolved provider address for this resource. SetProvider(string) } // ProviderTransformer is a GraphTransformer that maps resources to // providers within the graph. This will error if there are any resources // that don't map to proper resources. type ProviderTransformer struct{} func (t *ProviderTransformer) Transform(g *Graph) error { // Go through the other nodes and match them to providers they need var err error m := providerVertexMap(g) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProviderConsumer); ok { p := pv.ProvidedBy() key := providerMapKey(p, pv) target := m[key] sp, ok := pv.(GraphNodeSubPath) if !ok && target == nil { // no target, and no path to walk up err = multierror.Append(err, fmt.Errorf( "%s: provider %s couldn't be found", dag.VertexName(v), p)) break } // if we don't have a provider at this level, walk up the path looking for one for i := 1; target == nil; i++ { path := normalizeModulePath(sp.Path()) if len(path) < i { break } key = ResolveProviderName(p, path[:len(path)-i]) target = m[key] if target != nil { break } } if target == nil { err = multierror.Append(err, fmt.Errorf( "%s: configuration for %s is not present; a provider configuration block is required for all operations", dag.VertexName(v), p, )) break } // see if this in an inherited provider if p, ok := target.(*graphNodeProxyProvider); ok { g.Remove(p) target = p.Target() key = target.(GraphNodeProvider).Name() } log.Printf("[DEBUG] resource %s using provider %s", dag.VertexName(pv), key) pv.SetProvider(key) g.Connect(dag.BasicEdge(v, target)) } } return err } // CloseProviderTransformer is a GraphTransformer that adds nodes to the // graph that will close open provider connections that aren't needed anymore. // A provider connection is not needed anymore once all depended resources // in the graph are evaluated. type CloseProviderTransformer struct{} func (t *CloseProviderTransformer) Transform(g *Graph) error { pm := providerVertexMap(g) cpm := make(map[string]*graphNodeCloseProvider) var err error for _, v := range pm { p := v.(GraphNodeProvider) // get the close provider of this type if we alread created it closer := cpm[p.ProviderName()] if closer == nil { // create a closer for this provider type closer = &graphNodeCloseProvider{ProviderNameValue: p.ProviderName()} g.Add(closer) cpm[p.ProviderName()] = closer } // Close node depends on the provider itself // this is added unconditionally, so it will connect to all instances // of the provider. Extra edges will be removed by transitive // reduction. g.Connect(dag.BasicEdge(closer, p)) // connect all the provider's resources to the close node for _, s := range g.UpEdges(p).List() { if _, ok := s.(GraphNodeProviderConsumer); ok { g.Connect(dag.BasicEdge(closer, s)) } } } return err } // MissingProviderTransformer is a GraphTransformer that adds nodes for all // required providers into the graph. Specifically, it creates provider // configuration nodes for all the providers that we support. These are pruned // later during an optimization pass. type MissingProviderTransformer struct { // Providers is the list of providers we support. Providers []string // Concrete, if set, overrides how the providers are made. Concrete ConcreteProviderNodeFunc } func (t *MissingProviderTransformer) Transform(g *Graph) error { // Initialize factory if t.Concrete == nil { t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { return a } } var err error m := providerVertexMap(g) for _, v := range g.Vertices() { pv, ok := v.(GraphNodeProviderConsumer) if !ok { continue } p := pv.ProvidedBy() // this may be the resolved provider from the state, so we need to get // the base provider name. parts := strings.SplitAfter(p, "provider.") p = parts[len(parts)-1] key := ResolveProviderName(p, nil) provider := m[key] // we already have it if provider != nil { continue } // we don't implicitly create aliased providers if strings.Contains(p, ".") { log.Println("[DEBUG] not adding missing provider alias:", p) continue } log.Println("[DEBUG] adding missing provider:", p) // create the misisng top-level provider provider = t.Concrete(&NodeAbstractProvider{ NameValue: p, }).(dag.Vertex) m[key] = g.Add(provider) } return err } // ParentProviderTransformer connects provider nodes to their parents. // // This works by finding nodes that are both GraphNodeProviders and // GraphNodeSubPath. It then connects the providers to their parent // path. The parent provider is always at the root level. type ParentProviderTransformer struct{} func (t *ParentProviderTransformer) Transform(g *Graph) error { pm := providerVertexMap(g) for _, v := range g.Vertices() { // Only care about providers pn, ok := v.(GraphNodeProvider) if !ok || pn.ProviderName() == "" { continue } // Also require a subpath, if there is no subpath then we // can't have a parent. if pn, ok := v.(GraphNodeSubPath); ok { if len(normalizeModulePath(pn.Path())) <= 1 { continue } } // this provider may be disabled, but we can only get it's name from // the ProviderName string name := ResolveProviderName(strings.SplitN(pn.ProviderName(), " ", 2)[0], nil) parent := pm[name] if parent != nil { g.Connect(dag.BasicEdge(v, parent)) } } return nil } // PruneProviderTransformer removes any providers that are not actually used by // anything, and provider proxies. This avoids the provider being initialized // and configured. This both saves resources but also avoids errors since // configuration may imply initialization which may require auth. type PruneProviderTransformer struct{} func (t *PruneProviderTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { // We only care about providers pn, ok := v.(GraphNodeProvider) if !ok || pn.ProviderName() == "" { continue } // ProxyProviders will have up edges, but we're now done with them in the graph if _, ok := v.(*graphNodeProxyProvider); ok { log.Printf("[DEBUG] pruning proxy provider %s", dag.VertexName(v)) g.Remove(v) } // Remove providers with no dependencies. if g.UpEdges(v).Len() == 0 { log.Printf("[DEBUG] pruning unused provider %s", dag.VertexName(v)) g.Remove(v) } } return nil } // providerMapKey is a helper that gives us the key to use for the // maps returned by things such as providerVertexMap. func providerMapKey(k string, v dag.Vertex) string { if strings.Contains(k, "provider.") { // this is already resolved return k } // we create a dummy provider to var path []string if sp, ok := v.(GraphNodeSubPath); ok { path = normalizeModulePath(sp.Path()) } return ResolveProviderName(k, path) } func providerVertexMap(g *Graph) map[string]dag.Vertex { m := make(map[string]dag.Vertex) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProvider); ok { // TODO: The Name may have meta info, like " (disabled)" name := strings.SplitN(pv.Name(), " ", 2)[0] m[name] = v } } return m } func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { m := make(map[string]dag.Vertex) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeCloseProvider); ok { m[pv.CloseProviderName()] = v } } return m } type graphNodeCloseProvider struct { ProviderNameValue string } func (n *graphNodeCloseProvider) Name() string { return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue) } // GraphNodeEvalable impl. func (n *graphNodeCloseProvider) EvalTree() EvalNode { return CloseProviderEvalTree(n.ProviderNameValue) } // GraphNodeDependable impl. func (n *graphNodeCloseProvider) DependableName() []string { return []string{n.Name()} } func (n *graphNodeCloseProvider) CloseProviderName() string { return n.ProviderNameValue } // GraphNodeDotter impl. func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { if !opts.Verbose { return nil } return &dag.DotNode{ Name: name, Attrs: map[string]string{ "label": n.Name(), "shape": "diamond", }, } } // RemovableIfNotTargeted func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { // We need to add this so that this node will be removed if // it isn't targeted or a dependency of a target. return true } // graphNodeProxyProvider is a GraphNodeProvider implementation that is used to // store the name and value of a provider node for inheritance between modules. // These nodes are only used to store the data while loading the provider // configurations, and are removed after all the resources have been connected // to their providers. type graphNodeProxyProvider struct { nameValue string path []string target GraphNodeProvider } func (n *graphNodeProxyProvider) ProviderName() string { return n.Target().ProviderName() } func (n *graphNodeProxyProvider) Name() string { return ResolveProviderName(n.nameValue, n.path) } // find the concrete provider instance func (n *graphNodeProxyProvider) Target() GraphNodeProvider { switch t := n.target.(type) { case *graphNodeProxyProvider: return t.Target() default: return n.target } } // ProviderConfigTransformer adds all provider nodes from the configuration and // attaches the configs. type ProviderConfigTransformer struct { Providers []string Concrete ConcreteProviderNodeFunc // each provider node is stored here so that the proxy nodes can look up // their targets by name. providers map[string]GraphNodeProvider // record providers that can be overriden with a proxy proxiable map[string]bool // Module is the module to add resources from. Module *module.Tree } func (t *ProviderConfigTransformer) Transform(g *Graph) error { // If no module is given, we don't do anything if t.Module == nil { return nil } // If the module isn't loaded, that is simply an error if !t.Module.Loaded() { return errors.New("module must be loaded for ProviderConfigTransformer") } t.providers = make(map[string]GraphNodeProvider) t.proxiable = make(map[string]bool) // Start the transformation process if err := t.transform(g, t.Module); err != nil { return err } // finally attach the configs to the new nodes return t.attachProviderConfigs(g) } func (t *ProviderConfigTransformer)
(g *Graph, m *module.Tree) error { // If no config, do nothing if m == nil { return nil } // Add our resources if err := t.transformSingle(g, m); err != nil { return err } // Transform all the children. for _, c := range m.Children() { if err := t.transform(g, c); err != nil { return err } } return nil } func (t *ProviderConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { log.Printf("[TRACE] ProviderConfigTransformer: Starting for path: %v", m.Path()) // Get the configuration for this module conf := m.Config() // Build the path we're at path := m.Path() if len(path) > 0 { path = append([]string{RootModuleName}, path...) } // add all providers from the configuration for _, p := range conf.ProviderConfigs { name := p.Name if p.Alias != "" { name += "." + p.Alias } v := t.Concrete(&NodeAbstractProvider{ NameValue: name, PathValue: path, }) // Add it to the graph g.Add(v) fullName := ResolveProviderName(name, path) t.providers[fullName] = v.(GraphNodeProvider) t.proxiable[fullName] = len(p.RawConfig.RawMap()) == 0 } // Now replace the provider nodes with proxy nodes if a provider was being // passed in, and create implicit proxies if there was no config. Any extra // proxies will be removed in the prune step. return t.addProxyProviders(g, m) } func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) error { path := m.Path() // can't add proxies at the root if len(path) == 0 { return nil } parentPath := path[:len(path)-1] parent := t.Module.Child(parentPath) if parent == nil { return nil } var parentCfg *config.Module for _, mod := range parent.Config().Modules { if mod.Name == m.Name() { parentCfg = mod break } } if parentCfg == nil { // this can't really happen during normal execution. return fmt.Errorf("parent module config not found for %s", m.Name()) } // Go through all the providers the parent is passing in, and add proxies to // the parent provider nodes. for name, parentName := range parentCfg.Providers { fullName := ResolveProviderName(name, path) fullParentName := ResolveProviderName(parentName, parentPath) parentProvider := t.providers[fullParentName] if parentProvider == nil { return fmt.Errorf("missing provider %s", fullParentName) } proxy := &graphNodeProxyProvider{ nameValue: name, path: path, target: parentProvider, } concreteProvider := t.providers[fullName] // replace the concrete node with the provider passed in if concreteProvider != nil && t.proxiable[fullName] { g.Replace(concreteProvider, proxy) t.providers[fullName] = proxy continue } // aliased providers can't be implicitly passed in if strings.Contains(name, ".") { continue } // There was no concrete provider, so add this as an implicit provider. // The extra proxy will be pruned later if it's unused. g.Add(proxy) t.providers[fullName] = proxy } return nil } func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { for _, v := range g.Vertices() { // Only care about GraphNodeAttachProvider implementations apn, ok := v.(GraphNodeAttachProvider) if !ok { continue } // Determine what we're looking for path := normalizeModulePath(apn.Path())[1:] name := apn.ProviderName() log.Printf("[TRACE] Attach provider request: %#v %s", path, name) // Get the configuration. tree := t.Module.Child(path) if tree == nil { continue } // Go through the provider configs to find the matching config for _, p := range tree.Config().ProviderConfigs { // Build the name, which is "name.alias" if an alias exists current := p.Name if p.Alias != "" { current += "." + p.Alias } // If the configs match then attach! if current == name { log.Printf("[TRACE] Attaching provider config: %#v", p) apn.AttachProvider(p) break } } } return nil }
transform
identifier_name
transform_provider.go
package terraform import ( "errors" "fmt" "log" "strings" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/dag" ) func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, mod *module.Tree) GraphTransformer { return GraphTransformMulti( // Add providers from the config &ProviderConfigTransformer{ Module: mod, Providers: providers, Concrete: concrete, }, // Add any remaining missing providers &MissingProviderTransformer{ Providers: providers, Concrete: concrete, }, // Connect the providers &ProviderTransformer{}, // Remove unused providers and proxies &PruneProviderTransformer{}, // Connect provider to their parent provider nodes &ParentProviderTransformer{}, ) } // GraphNodeProvider is an interface that nodes that can be a provider // must implement. // ProviderName returns the name of the provider this satisfies. // Name returns the full name of the provider in the config. type GraphNodeProvider interface { ProviderName() string Name() string } // GraphNodeCloseProvider is an interface that nodes that can be a close // provider must implement. The CloseProviderName returned is the name of // the provider they satisfy. type GraphNodeCloseProvider interface { CloseProviderName() string } // GraphNodeProviderConsumer is an interface that nodes that require // a provider must implement. ProvidedBy must return the name of the provider // to use. This may be a provider by type, type.alias or a fully resolved // provider name type GraphNodeProviderConsumer interface { ProvidedBy() string // Set the resolved provider address for this resource. SetProvider(string) } // ProviderTransformer is a GraphTransformer that maps resources to // providers within the graph. This will error if there are any resources // that don't map to proper resources. type ProviderTransformer struct{} func (t *ProviderTransformer) Transform(g *Graph) error { // Go through the other nodes and match them to providers they need var err error m := providerVertexMap(g) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProviderConsumer); ok { p := pv.ProvidedBy() key := providerMapKey(p, pv) target := m[key] sp, ok := pv.(GraphNodeSubPath) if !ok && target == nil { // no target, and no path to walk up err = multierror.Append(err, fmt.Errorf( "%s: provider %s couldn't be found", dag.VertexName(v), p)) break } // if we don't have a provider at this level, walk up the path looking for one for i := 1; target == nil; i++ { path := normalizeModulePath(sp.Path()) if len(path) < i { break } key = ResolveProviderName(p, path[:len(path)-i]) target = m[key] if target != nil { break } } if target == nil { err = multierror.Append(err, fmt.Errorf( "%s: configuration for %s is not present; a provider configuration block is required for all operations", dag.VertexName(v), p, )) break } // see if this in an inherited provider if p, ok := target.(*graphNodeProxyProvider); ok { g.Remove(p) target = p.Target() key = target.(GraphNodeProvider).Name() } log.Printf("[DEBUG] resource %s using provider %s", dag.VertexName(pv), key) pv.SetProvider(key) g.Connect(dag.BasicEdge(v, target)) } } return err } // CloseProviderTransformer is a GraphTransformer that adds nodes to the // graph that will close open provider connections that aren't needed anymore. // A provider connection is not needed anymore once all depended resources // in the graph are evaluated. type CloseProviderTransformer struct{} func (t *CloseProviderTransformer) Transform(g *Graph) error { pm := providerVertexMap(g) cpm := make(map[string]*graphNodeCloseProvider) var err error for _, v := range pm { p := v.(GraphNodeProvider) // get the close provider of this type if we alread created it closer := cpm[p.ProviderName()] if closer == nil { // create a closer for this provider type closer = &graphNodeCloseProvider{ProviderNameValue: p.ProviderName()} g.Add(closer) cpm[p.ProviderName()] = closer } // Close node depends on the provider itself // this is added unconditionally, so it will connect to all instances // of the provider. Extra edges will be removed by transitive // reduction. g.Connect(dag.BasicEdge(closer, p)) // connect all the provider's resources to the close node for _, s := range g.UpEdges(p).List() { if _, ok := s.(GraphNodeProviderConsumer); ok { g.Connect(dag.BasicEdge(closer, s)) } } } return err } // MissingProviderTransformer is a GraphTransformer that adds nodes for all // required providers into the graph. Specifically, it creates provider // configuration nodes for all the providers that we support. These are pruned // later during an optimization pass. type MissingProviderTransformer struct { // Providers is the list of providers we support. Providers []string // Concrete, if set, overrides how the providers are made. Concrete ConcreteProviderNodeFunc } func (t *MissingProviderTransformer) Transform(g *Graph) error { // Initialize factory if t.Concrete == nil { t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { return a } } var err error m := providerVertexMap(g) for _, v := range g.Vertices() { pv, ok := v.(GraphNodeProviderConsumer) if !ok { continue } p := pv.ProvidedBy() // this may be the resolved provider from the state, so we need to get // the base provider name. parts := strings.SplitAfter(p, "provider.") p = parts[len(parts)-1] key := ResolveProviderName(p, nil) provider := m[key] // we already have it if provider != nil { continue } // we don't implicitly create aliased providers if strings.Contains(p, ".") { log.Println("[DEBUG] not adding missing provider alias:", p) continue } log.Println("[DEBUG] adding missing provider:", p) // create the misisng top-level provider provider = t.Concrete(&NodeAbstractProvider{ NameValue: p, }).(dag.Vertex) m[key] = g.Add(provider) } return err } // ParentProviderTransformer connects provider nodes to their parents. // // This works by finding nodes that are both GraphNodeProviders and // GraphNodeSubPath. It then connects the providers to their parent // path. The parent provider is always at the root level. type ParentProviderTransformer struct{} func (t *ParentProviderTransformer) Transform(g *Graph) error { pm := providerVertexMap(g) for _, v := range g.Vertices() { // Only care about providers pn, ok := v.(GraphNodeProvider) if !ok || pn.ProviderName() == "" { continue } // Also require a subpath, if there is no subpath then we // can't have a parent. if pn, ok := v.(GraphNodeSubPath); ok { if len(normalizeModulePath(pn.Path())) <= 1 { continue } } // this provider may be disabled, but we can only get it's name from // the ProviderName string name := ResolveProviderName(strings.SplitN(pn.ProviderName(), " ", 2)[0], nil) parent := pm[name] if parent != nil { g.Connect(dag.BasicEdge(v, parent)) } } return nil } // PruneProviderTransformer removes any providers that are not actually used by // anything, and provider proxies. This avoids the provider being initialized // and configured. This both saves resources but also avoids errors since // configuration may imply initialization which may require auth. type PruneProviderTransformer struct{} func (t *PruneProviderTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { // We only care about providers pn, ok := v.(GraphNodeProvider) if !ok || pn.ProviderName() == "" { continue } // ProxyProviders will have up edges, but we're now done with them in the graph if _, ok := v.(*graphNodeProxyProvider); ok { log.Printf("[DEBUG] pruning proxy provider %s", dag.VertexName(v)) g.Remove(v) } // Remove providers with no dependencies. if g.UpEdges(v).Len() == 0 { log.Printf("[DEBUG] pruning unused provider %s", dag.VertexName(v)) g.Remove(v) } } return nil } // providerMapKey is a helper that gives us the key to use for the // maps returned by things such as providerVertexMap. func providerMapKey(k string, v dag.Vertex) string { if strings.Contains(k, "provider.") { // this is already resolved return k } // we create a dummy provider to var path []string if sp, ok := v.(GraphNodeSubPath); ok { path = normalizeModulePath(sp.Path()) } return ResolveProviderName(k, path) } func providerVertexMap(g *Graph) map[string]dag.Vertex { m := make(map[string]dag.Vertex) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProvider); ok { // TODO: The Name may have meta info, like " (disabled)" name := strings.SplitN(pv.Name(), " ", 2)[0] m[name] = v } } return m } func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { m := make(map[string]dag.Vertex) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeCloseProvider); ok { m[pv.CloseProviderName()] = v } } return m } type graphNodeCloseProvider struct { ProviderNameValue string } func (n *graphNodeCloseProvider) Name() string { return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue) } // GraphNodeEvalable impl. func (n *graphNodeCloseProvider) EvalTree() EvalNode { return CloseProviderEvalTree(n.ProviderNameValue) } // GraphNodeDependable impl. func (n *graphNodeCloseProvider) DependableName() []string
func (n *graphNodeCloseProvider) CloseProviderName() string { return n.ProviderNameValue } // GraphNodeDotter impl. func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { if !opts.Verbose { return nil } return &dag.DotNode{ Name: name, Attrs: map[string]string{ "label": n.Name(), "shape": "diamond", }, } } // RemovableIfNotTargeted func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { // We need to add this so that this node will be removed if // it isn't targeted or a dependency of a target. return true } // graphNodeProxyProvider is a GraphNodeProvider implementation that is used to // store the name and value of a provider node for inheritance between modules. // These nodes are only used to store the data while loading the provider // configurations, and are removed after all the resources have been connected // to their providers. type graphNodeProxyProvider struct { nameValue string path []string target GraphNodeProvider } func (n *graphNodeProxyProvider) ProviderName() string { return n.Target().ProviderName() } func (n *graphNodeProxyProvider) Name() string { return ResolveProviderName(n.nameValue, n.path) } // find the concrete provider instance func (n *graphNodeProxyProvider) Target() GraphNodeProvider { switch t := n.target.(type) { case *graphNodeProxyProvider: return t.Target() default: return n.target } } // ProviderConfigTransformer adds all provider nodes from the configuration and // attaches the configs. type ProviderConfigTransformer struct { Providers []string Concrete ConcreteProviderNodeFunc // each provider node is stored here so that the proxy nodes can look up // their targets by name. providers map[string]GraphNodeProvider // record providers that can be overriden with a proxy proxiable map[string]bool // Module is the module to add resources from. Module *module.Tree } func (t *ProviderConfigTransformer) Transform(g *Graph) error { // If no module is given, we don't do anything if t.Module == nil { return nil } // If the module isn't loaded, that is simply an error if !t.Module.Loaded() { return errors.New("module must be loaded for ProviderConfigTransformer") } t.providers = make(map[string]GraphNodeProvider) t.proxiable = make(map[string]bool) // Start the transformation process if err := t.transform(g, t.Module); err != nil { return err } // finally attach the configs to the new nodes return t.attachProviderConfigs(g) } func (t *ProviderConfigTransformer) transform(g *Graph, m *module.Tree) error { // If no config, do nothing if m == nil { return nil } // Add our resources if err := t.transformSingle(g, m); err != nil { return err } // Transform all the children. for _, c := range m.Children() { if err := t.transform(g, c); err != nil { return err } } return nil } func (t *ProviderConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { log.Printf("[TRACE] ProviderConfigTransformer: Starting for path: %v", m.Path()) // Get the configuration for this module conf := m.Config() // Build the path we're at path := m.Path() if len(path) > 0 { path = append([]string{RootModuleName}, path...) } // add all providers from the configuration for _, p := range conf.ProviderConfigs { name := p.Name if p.Alias != "" { name += "." + p.Alias } v := t.Concrete(&NodeAbstractProvider{ NameValue: name, PathValue: path, }) // Add it to the graph g.Add(v) fullName := ResolveProviderName(name, path) t.providers[fullName] = v.(GraphNodeProvider) t.proxiable[fullName] = len(p.RawConfig.RawMap()) == 0 } // Now replace the provider nodes with proxy nodes if a provider was being // passed in, and create implicit proxies if there was no config. Any extra // proxies will be removed in the prune step. return t.addProxyProviders(g, m) } func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) error { path := m.Path() // can't add proxies at the root if len(path) == 0 { return nil } parentPath := path[:len(path)-1] parent := t.Module.Child(parentPath) if parent == nil { return nil } var parentCfg *config.Module for _, mod := range parent.Config().Modules { if mod.Name == m.Name() { parentCfg = mod break } } if parentCfg == nil { // this can't really happen during normal execution. return fmt.Errorf("parent module config not found for %s", m.Name()) } // Go through all the providers the parent is passing in, and add proxies to // the parent provider nodes. for name, parentName := range parentCfg.Providers { fullName := ResolveProviderName(name, path) fullParentName := ResolveProviderName(parentName, parentPath) parentProvider := t.providers[fullParentName] if parentProvider == nil { return fmt.Errorf("missing provider %s", fullParentName) } proxy := &graphNodeProxyProvider{ nameValue: name, path: path, target: parentProvider, } concreteProvider := t.providers[fullName] // replace the concrete node with the provider passed in if concreteProvider != nil && t.proxiable[fullName] { g.Replace(concreteProvider, proxy) t.providers[fullName] = proxy continue } // aliased providers can't be implicitly passed in if strings.Contains(name, ".") { continue } // There was no concrete provider, so add this as an implicit provider. // The extra proxy will be pruned later if it's unused. g.Add(proxy) t.providers[fullName] = proxy } return nil } func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { for _, v := range g.Vertices() { // Only care about GraphNodeAttachProvider implementations apn, ok := v.(GraphNodeAttachProvider) if !ok { continue } // Determine what we're looking for path := normalizeModulePath(apn.Path())[1:] name := apn.ProviderName() log.Printf("[TRACE] Attach provider request: %#v %s", path, name) // Get the configuration. tree := t.Module.Child(path) if tree == nil { continue } // Go through the provider configs to find the matching config for _, p := range tree.Config().ProviderConfigs { // Build the name, which is "name.alias" if an alias exists current := p.Name if p.Alias != "" { current += "." + p.Alias } // If the configs match then attach! if current == name { log.Printf("[TRACE] Attaching provider config: %#v", p) apn.AttachProvider(p) break } } } return nil }
{ return []string{n.Name()} }
identifier_body
transform_provider.go
package terraform import ( "errors" "fmt" "log" "strings" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/dag" ) func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, mod *module.Tree) GraphTransformer { return GraphTransformMulti( // Add providers from the config &ProviderConfigTransformer{ Module: mod, Providers: providers, Concrete: concrete, }, // Add any remaining missing providers &MissingProviderTransformer{ Providers: providers, Concrete: concrete, }, // Connect the providers &ProviderTransformer{}, // Remove unused providers and proxies &PruneProviderTransformer{}, // Connect provider to their parent provider nodes &ParentProviderTransformer{}, ) } // GraphNodeProvider is an interface that nodes that can be a provider // must implement. // ProviderName returns the name of the provider this satisfies. // Name returns the full name of the provider in the config. type GraphNodeProvider interface { ProviderName() string Name() string } // GraphNodeCloseProvider is an interface that nodes that can be a close // provider must implement. The CloseProviderName returned is the name of // the provider they satisfy. type GraphNodeCloseProvider interface { CloseProviderName() string } // GraphNodeProviderConsumer is an interface that nodes that require // a provider must implement. ProvidedBy must return the name of the provider // to use. This may be a provider by type, type.alias or a fully resolved // provider name type GraphNodeProviderConsumer interface { ProvidedBy() string // Set the resolved provider address for this resource. SetProvider(string) } // ProviderTransformer is a GraphTransformer that maps resources to // providers within the graph. This will error if there are any resources // that don't map to proper resources. type ProviderTransformer struct{} func (t *ProviderTransformer) Transform(g *Graph) error { // Go through the other nodes and match them to providers they need var err error m := providerVertexMap(g) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProviderConsumer); ok { p := pv.ProvidedBy() key := providerMapKey(p, pv) target := m[key] sp, ok := pv.(GraphNodeSubPath) if !ok && target == nil { // no target, and no path to walk up err = multierror.Append(err, fmt.Errorf( "%s: provider %s couldn't be found", dag.VertexName(v), p)) break } // if we don't have a provider at this level, walk up the path looking for one for i := 1; target == nil; i++ { path := normalizeModulePath(sp.Path()) if len(path) < i { break } key = ResolveProviderName(p, path[:len(path)-i]) target = m[key] if target != nil { break } } if target == nil { err = multierror.Append(err, fmt.Errorf( "%s: configuration for %s is not present; a provider configuration block is required for all operations", dag.VertexName(v), p, )) break } // see if this in an inherited provider if p, ok := target.(*graphNodeProxyProvider); ok
log.Printf("[DEBUG] resource %s using provider %s", dag.VertexName(pv), key) pv.SetProvider(key) g.Connect(dag.BasicEdge(v, target)) } } return err } // CloseProviderTransformer is a GraphTransformer that adds nodes to the // graph that will close open provider connections that aren't needed anymore. // A provider connection is not needed anymore once all depended resources // in the graph are evaluated. type CloseProviderTransformer struct{} func (t *CloseProviderTransformer) Transform(g *Graph) error { pm := providerVertexMap(g) cpm := make(map[string]*graphNodeCloseProvider) var err error for _, v := range pm { p := v.(GraphNodeProvider) // get the close provider of this type if we alread created it closer := cpm[p.ProviderName()] if closer == nil { // create a closer for this provider type closer = &graphNodeCloseProvider{ProviderNameValue: p.ProviderName()} g.Add(closer) cpm[p.ProviderName()] = closer } // Close node depends on the provider itself // this is added unconditionally, so it will connect to all instances // of the provider. Extra edges will be removed by transitive // reduction. g.Connect(dag.BasicEdge(closer, p)) // connect all the provider's resources to the close node for _, s := range g.UpEdges(p).List() { if _, ok := s.(GraphNodeProviderConsumer); ok { g.Connect(dag.BasicEdge(closer, s)) } } } return err } // MissingProviderTransformer is a GraphTransformer that adds nodes for all // required providers into the graph. Specifically, it creates provider // configuration nodes for all the providers that we support. These are pruned // later during an optimization pass. type MissingProviderTransformer struct { // Providers is the list of providers we support. Providers []string // Concrete, if set, overrides how the providers are made. Concrete ConcreteProviderNodeFunc } func (t *MissingProviderTransformer) Transform(g *Graph) error { // Initialize factory if t.Concrete == nil { t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { return a } } var err error m := providerVertexMap(g) for _, v := range g.Vertices() { pv, ok := v.(GraphNodeProviderConsumer) if !ok { continue } p := pv.ProvidedBy() // this may be the resolved provider from the state, so we need to get // the base provider name. parts := strings.SplitAfter(p, "provider.") p = parts[len(parts)-1] key := ResolveProviderName(p, nil) provider := m[key] // we already have it if provider != nil { continue } // we don't implicitly create aliased providers if strings.Contains(p, ".") { log.Println("[DEBUG] not adding missing provider alias:", p) continue } log.Println("[DEBUG] adding missing provider:", p) // create the misisng top-level provider provider = t.Concrete(&NodeAbstractProvider{ NameValue: p, }).(dag.Vertex) m[key] = g.Add(provider) } return err } // ParentProviderTransformer connects provider nodes to their parents. // // This works by finding nodes that are both GraphNodeProviders and // GraphNodeSubPath. It then connects the providers to their parent // path. The parent provider is always at the root level. type ParentProviderTransformer struct{} func (t *ParentProviderTransformer) Transform(g *Graph) error { pm := providerVertexMap(g) for _, v := range g.Vertices() { // Only care about providers pn, ok := v.(GraphNodeProvider) if !ok || pn.ProviderName() == "" { continue } // Also require a subpath, if there is no subpath then we // can't have a parent. if pn, ok := v.(GraphNodeSubPath); ok { if len(normalizeModulePath(pn.Path())) <= 1 { continue } } // this provider may be disabled, but we can only get it's name from // the ProviderName string name := ResolveProviderName(strings.SplitN(pn.ProviderName(), " ", 2)[0], nil) parent := pm[name] if parent != nil { g.Connect(dag.BasicEdge(v, parent)) } } return nil } // PruneProviderTransformer removes any providers that are not actually used by // anything, and provider proxies. This avoids the provider being initialized // and configured. This both saves resources but also avoids errors since // configuration may imply initialization which may require auth. type PruneProviderTransformer struct{} func (t *PruneProviderTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { // We only care about providers pn, ok := v.(GraphNodeProvider) if !ok || pn.ProviderName() == "" { continue } // ProxyProviders will have up edges, but we're now done with them in the graph if _, ok := v.(*graphNodeProxyProvider); ok { log.Printf("[DEBUG] pruning proxy provider %s", dag.VertexName(v)) g.Remove(v) } // Remove providers with no dependencies. if g.UpEdges(v).Len() == 0 { log.Printf("[DEBUG] pruning unused provider %s", dag.VertexName(v)) g.Remove(v) } } return nil } // providerMapKey is a helper that gives us the key to use for the // maps returned by things such as providerVertexMap. func providerMapKey(k string, v dag.Vertex) string { if strings.Contains(k, "provider.") { // this is already resolved return k } // we create a dummy provider to var path []string if sp, ok := v.(GraphNodeSubPath); ok { path = normalizeModulePath(sp.Path()) } return ResolveProviderName(k, path) } func providerVertexMap(g *Graph) map[string]dag.Vertex { m := make(map[string]dag.Vertex) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProvider); ok { // TODO: The Name may have meta info, like " (disabled)" name := strings.SplitN(pv.Name(), " ", 2)[0] m[name] = v } } return m } func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { m := make(map[string]dag.Vertex) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeCloseProvider); ok { m[pv.CloseProviderName()] = v } } return m } type graphNodeCloseProvider struct { ProviderNameValue string } func (n *graphNodeCloseProvider) Name() string { return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue) } // GraphNodeEvalable impl. func (n *graphNodeCloseProvider) EvalTree() EvalNode { return CloseProviderEvalTree(n.ProviderNameValue) } // GraphNodeDependable impl. func (n *graphNodeCloseProvider) DependableName() []string { return []string{n.Name()} } func (n *graphNodeCloseProvider) CloseProviderName() string { return n.ProviderNameValue } // GraphNodeDotter impl. func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { if !opts.Verbose { return nil } return &dag.DotNode{ Name: name, Attrs: map[string]string{ "label": n.Name(), "shape": "diamond", }, } } // RemovableIfNotTargeted func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { // We need to add this so that this node will be removed if // it isn't targeted or a dependency of a target. return true } // graphNodeProxyProvider is a GraphNodeProvider implementation that is used to // store the name and value of a provider node for inheritance between modules. // These nodes are only used to store the data while loading the provider // configurations, and are removed after all the resources have been connected // to their providers. type graphNodeProxyProvider struct { nameValue string path []string target GraphNodeProvider } func (n *graphNodeProxyProvider) ProviderName() string { return n.Target().ProviderName() } func (n *graphNodeProxyProvider) Name() string { return ResolveProviderName(n.nameValue, n.path) } // find the concrete provider instance func (n *graphNodeProxyProvider) Target() GraphNodeProvider { switch t := n.target.(type) { case *graphNodeProxyProvider: return t.Target() default: return n.target } } // ProviderConfigTransformer adds all provider nodes from the configuration and // attaches the configs. type ProviderConfigTransformer struct { Providers []string Concrete ConcreteProviderNodeFunc // each provider node is stored here so that the proxy nodes can look up // their targets by name. providers map[string]GraphNodeProvider // record providers that can be overriden with a proxy proxiable map[string]bool // Module is the module to add resources from. Module *module.Tree } func (t *ProviderConfigTransformer) Transform(g *Graph) error { // If no module is given, we don't do anything if t.Module == nil { return nil } // If the module isn't loaded, that is simply an error if !t.Module.Loaded() { return errors.New("module must be loaded for ProviderConfigTransformer") } t.providers = make(map[string]GraphNodeProvider) t.proxiable = make(map[string]bool) // Start the transformation process if err := t.transform(g, t.Module); err != nil { return err } // finally attach the configs to the new nodes return t.attachProviderConfigs(g) } func (t *ProviderConfigTransformer) transform(g *Graph, m *module.Tree) error { // If no config, do nothing if m == nil { return nil } // Add our resources if err := t.transformSingle(g, m); err != nil { return err } // Transform all the children. for _, c := range m.Children() { if err := t.transform(g, c); err != nil { return err } } return nil } func (t *ProviderConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { log.Printf("[TRACE] ProviderConfigTransformer: Starting for path: %v", m.Path()) // Get the configuration for this module conf := m.Config() // Build the path we're at path := m.Path() if len(path) > 0 { path = append([]string{RootModuleName}, path...) } // add all providers from the configuration for _, p := range conf.ProviderConfigs { name := p.Name if p.Alias != "" { name += "." + p.Alias } v := t.Concrete(&NodeAbstractProvider{ NameValue: name, PathValue: path, }) // Add it to the graph g.Add(v) fullName := ResolveProviderName(name, path) t.providers[fullName] = v.(GraphNodeProvider) t.proxiable[fullName] = len(p.RawConfig.RawMap()) == 0 } // Now replace the provider nodes with proxy nodes if a provider was being // passed in, and create implicit proxies if there was no config. Any extra // proxies will be removed in the prune step. return t.addProxyProviders(g, m) } func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) error { path := m.Path() // can't add proxies at the root if len(path) == 0 { return nil } parentPath := path[:len(path)-1] parent := t.Module.Child(parentPath) if parent == nil { return nil } var parentCfg *config.Module for _, mod := range parent.Config().Modules { if mod.Name == m.Name() { parentCfg = mod break } } if parentCfg == nil { // this can't really happen during normal execution. return fmt.Errorf("parent module config not found for %s", m.Name()) } // Go through all the providers the parent is passing in, and add proxies to // the parent provider nodes. for name, parentName := range parentCfg.Providers { fullName := ResolveProviderName(name, path) fullParentName := ResolveProviderName(parentName, parentPath) parentProvider := t.providers[fullParentName] if parentProvider == nil { return fmt.Errorf("missing provider %s", fullParentName) } proxy := &graphNodeProxyProvider{ nameValue: name, path: path, target: parentProvider, } concreteProvider := t.providers[fullName] // replace the concrete node with the provider passed in if concreteProvider != nil && t.proxiable[fullName] { g.Replace(concreteProvider, proxy) t.providers[fullName] = proxy continue } // aliased providers can't be implicitly passed in if strings.Contains(name, ".") { continue } // There was no concrete provider, so add this as an implicit provider. // The extra proxy will be pruned later if it's unused. g.Add(proxy) t.providers[fullName] = proxy } return nil } func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { for _, v := range g.Vertices() { // Only care about GraphNodeAttachProvider implementations apn, ok := v.(GraphNodeAttachProvider) if !ok { continue } // Determine what we're looking for path := normalizeModulePath(apn.Path())[1:] name := apn.ProviderName() log.Printf("[TRACE] Attach provider request: %#v %s", path, name) // Get the configuration. tree := t.Module.Child(path) if tree == nil { continue } // Go through the provider configs to find the matching config for _, p := range tree.Config().ProviderConfigs { // Build the name, which is "name.alias" if an alias exists current := p.Name if p.Alias != "" { current += "." + p.Alias } // If the configs match then attach! if current == name { log.Printf("[TRACE] Attaching provider config: %#v", p) apn.AttachProvider(p) break } } } return nil }
{ g.Remove(p) target = p.Target() key = target.(GraphNodeProvider).Name() }
conditional_block
transform_provider.go
package terraform import ( "errors" "fmt" "log" "strings" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/dag" ) func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, mod *module.Tree) GraphTransformer { return GraphTransformMulti( // Add providers from the config &ProviderConfigTransformer{ Module: mod, Providers: providers, Concrete: concrete, }, // Add any remaining missing providers &MissingProviderTransformer{ Providers: providers, Concrete: concrete, }, // Connect the providers &ProviderTransformer{}, // Remove unused providers and proxies &PruneProviderTransformer{}, // Connect provider to their parent provider nodes &ParentProviderTransformer{}, ) } // GraphNodeProvider is an interface that nodes that can be a provider // must implement. // ProviderName returns the name of the provider this satisfies. // Name returns the full name of the provider in the config. type GraphNodeProvider interface { ProviderName() string Name() string } // GraphNodeCloseProvider is an interface that nodes that can be a close // provider must implement. The CloseProviderName returned is the name of // the provider they satisfy. type GraphNodeCloseProvider interface { CloseProviderName() string } // GraphNodeProviderConsumer is an interface that nodes that require // a provider must implement. ProvidedBy must return the name of the provider // to use. This may be a provider by type, type.alias or a fully resolved // provider name type GraphNodeProviderConsumer interface { ProvidedBy() string // Set the resolved provider address for this resource. SetProvider(string) } // ProviderTransformer is a GraphTransformer that maps resources to // providers within the graph. This will error if there are any resources // that don't map to proper resources. type ProviderTransformer struct{} func (t *ProviderTransformer) Transform(g *Graph) error { // Go through the other nodes and match them to providers they need var err error m := providerVertexMap(g) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProviderConsumer); ok { p := pv.ProvidedBy() key := providerMapKey(p, pv) target := m[key] sp, ok := pv.(GraphNodeSubPath) if !ok && target == nil { // no target, and no path to walk up err = multierror.Append(err, fmt.Errorf( "%s: provider %s couldn't be found", dag.VertexName(v), p)) break } // if we don't have a provider at this level, walk up the path looking for one for i := 1; target == nil; i++ { path := normalizeModulePath(sp.Path()) if len(path) < i { break } key = ResolveProviderName(p, path[:len(path)-i]) target = m[key] if target != nil { break } } if target == nil { err = multierror.Append(err, fmt.Errorf( "%s: configuration for %s is not present; a provider configuration block is required for all operations", dag.VertexName(v), p, )) break } // see if this in an inherited provider if p, ok := target.(*graphNodeProxyProvider); ok { g.Remove(p) target = p.Target() key = target.(GraphNodeProvider).Name() } log.Printf("[DEBUG] resource %s using provider %s", dag.VertexName(pv), key) pv.SetProvider(key) g.Connect(dag.BasicEdge(v, target)) } } return err } // CloseProviderTransformer is a GraphTransformer that adds nodes to the // graph that will close open provider connections that aren't needed anymore. // A provider connection is not needed anymore once all depended resources // in the graph are evaluated. type CloseProviderTransformer struct{} func (t *CloseProviderTransformer) Transform(g *Graph) error { pm := providerVertexMap(g) cpm := make(map[string]*graphNodeCloseProvider) var err error for _, v := range pm { p := v.(GraphNodeProvider) // get the close provider of this type if we alread created it closer := cpm[p.ProviderName()] if closer == nil { // create a closer for this provider type closer = &graphNodeCloseProvider{ProviderNameValue: p.ProviderName()} g.Add(closer) cpm[p.ProviderName()] = closer } // Close node depends on the provider itself // this is added unconditionally, so it will connect to all instances // of the provider. Extra edges will be removed by transitive // reduction. g.Connect(dag.BasicEdge(closer, p)) // connect all the provider's resources to the close node for _, s := range g.UpEdges(p).List() { if _, ok := s.(GraphNodeProviderConsumer); ok { g.Connect(dag.BasicEdge(closer, s)) } } } return err } // MissingProviderTransformer is a GraphTransformer that adds nodes for all // required providers into the graph. Specifically, it creates provider // configuration nodes for all the providers that we support. These are pruned // later during an optimization pass. type MissingProviderTransformer struct { // Providers is the list of providers we support. Providers []string // Concrete, if set, overrides how the providers are made. Concrete ConcreteProviderNodeFunc } func (t *MissingProviderTransformer) Transform(g *Graph) error { // Initialize factory if t.Concrete == nil { t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { return a } } var err error m := providerVertexMap(g) for _, v := range g.Vertices() { pv, ok := v.(GraphNodeProviderConsumer) if !ok { continue } p := pv.ProvidedBy() // this may be the resolved provider from the state, so we need to get // the base provider name.
parts := strings.SplitAfter(p, "provider.") p = parts[len(parts)-1] key := ResolveProviderName(p, nil) provider := m[key] // we already have it if provider != nil { continue } // we don't implicitly create aliased providers if strings.Contains(p, ".") { log.Println("[DEBUG] not adding missing provider alias:", p) continue } log.Println("[DEBUG] adding missing provider:", p) // create the misisng top-level provider provider = t.Concrete(&NodeAbstractProvider{ NameValue: p, }).(dag.Vertex) m[key] = g.Add(provider) } return err } // ParentProviderTransformer connects provider nodes to their parents. // // This works by finding nodes that are both GraphNodeProviders and // GraphNodeSubPath. It then connects the providers to their parent // path. The parent provider is always at the root level. type ParentProviderTransformer struct{} func (t *ParentProviderTransformer) Transform(g *Graph) error { pm := providerVertexMap(g) for _, v := range g.Vertices() { // Only care about providers pn, ok := v.(GraphNodeProvider) if !ok || pn.ProviderName() == "" { continue } // Also require a subpath, if there is no subpath then we // can't have a parent. if pn, ok := v.(GraphNodeSubPath); ok { if len(normalizeModulePath(pn.Path())) <= 1 { continue } } // this provider may be disabled, but we can only get it's name from // the ProviderName string name := ResolveProviderName(strings.SplitN(pn.ProviderName(), " ", 2)[0], nil) parent := pm[name] if parent != nil { g.Connect(dag.BasicEdge(v, parent)) } } return nil } // PruneProviderTransformer removes any providers that are not actually used by // anything, and provider proxies. This avoids the provider being initialized // and configured. This both saves resources but also avoids errors since // configuration may imply initialization which may require auth. type PruneProviderTransformer struct{} func (t *PruneProviderTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { // We only care about providers pn, ok := v.(GraphNodeProvider) if !ok || pn.ProviderName() == "" { continue } // ProxyProviders will have up edges, but we're now done with them in the graph if _, ok := v.(*graphNodeProxyProvider); ok { log.Printf("[DEBUG] pruning proxy provider %s", dag.VertexName(v)) g.Remove(v) } // Remove providers with no dependencies. if g.UpEdges(v).Len() == 0 { log.Printf("[DEBUG] pruning unused provider %s", dag.VertexName(v)) g.Remove(v) } } return nil } // providerMapKey is a helper that gives us the key to use for the // maps returned by things such as providerVertexMap. func providerMapKey(k string, v dag.Vertex) string { if strings.Contains(k, "provider.") { // this is already resolved return k } // we create a dummy provider to var path []string if sp, ok := v.(GraphNodeSubPath); ok { path = normalizeModulePath(sp.Path()) } return ResolveProviderName(k, path) } func providerVertexMap(g *Graph) map[string]dag.Vertex { m := make(map[string]dag.Vertex) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProvider); ok { // TODO: The Name may have meta info, like " (disabled)" name := strings.SplitN(pv.Name(), " ", 2)[0] m[name] = v } } return m } func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { m := make(map[string]dag.Vertex) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeCloseProvider); ok { m[pv.CloseProviderName()] = v } } return m } type graphNodeCloseProvider struct { ProviderNameValue string } func (n *graphNodeCloseProvider) Name() string { return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue) } // GraphNodeEvalable impl. func (n *graphNodeCloseProvider) EvalTree() EvalNode { return CloseProviderEvalTree(n.ProviderNameValue) } // GraphNodeDependable impl. func (n *graphNodeCloseProvider) DependableName() []string { return []string{n.Name()} } func (n *graphNodeCloseProvider) CloseProviderName() string { return n.ProviderNameValue } // GraphNodeDotter impl. func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { if !opts.Verbose { return nil } return &dag.DotNode{ Name: name, Attrs: map[string]string{ "label": n.Name(), "shape": "diamond", }, } } // RemovableIfNotTargeted func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { // We need to add this so that this node will be removed if // it isn't targeted or a dependency of a target. return true } // graphNodeProxyProvider is a GraphNodeProvider implementation that is used to // store the name and value of a provider node for inheritance between modules. // These nodes are only used to store the data while loading the provider // configurations, and are removed after all the resources have been connected // to their providers. type graphNodeProxyProvider struct { nameValue string path []string target GraphNodeProvider } func (n *graphNodeProxyProvider) ProviderName() string { return n.Target().ProviderName() } func (n *graphNodeProxyProvider) Name() string { return ResolveProviderName(n.nameValue, n.path) } // find the concrete provider instance func (n *graphNodeProxyProvider) Target() GraphNodeProvider { switch t := n.target.(type) { case *graphNodeProxyProvider: return t.Target() default: return n.target } } // ProviderConfigTransformer adds all provider nodes from the configuration and // attaches the configs. type ProviderConfigTransformer struct { Providers []string Concrete ConcreteProviderNodeFunc // each provider node is stored here so that the proxy nodes can look up // their targets by name. providers map[string]GraphNodeProvider // record providers that can be overriden with a proxy proxiable map[string]bool // Module is the module to add resources from. Module *module.Tree } func (t *ProviderConfigTransformer) Transform(g *Graph) error { // If no module is given, we don't do anything if t.Module == nil { return nil } // If the module isn't loaded, that is simply an error if !t.Module.Loaded() { return errors.New("module must be loaded for ProviderConfigTransformer") } t.providers = make(map[string]GraphNodeProvider) t.proxiable = make(map[string]bool) // Start the transformation process if err := t.transform(g, t.Module); err != nil { return err } // finally attach the configs to the new nodes return t.attachProviderConfigs(g) } func (t *ProviderConfigTransformer) transform(g *Graph, m *module.Tree) error { // If no config, do nothing if m == nil { return nil } // Add our resources if err := t.transformSingle(g, m); err != nil { return err } // Transform all the children. for _, c := range m.Children() { if err := t.transform(g, c); err != nil { return err } } return nil } func (t *ProviderConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { log.Printf("[TRACE] ProviderConfigTransformer: Starting for path: %v", m.Path()) // Get the configuration for this module conf := m.Config() // Build the path we're at path := m.Path() if len(path) > 0 { path = append([]string{RootModuleName}, path...) } // add all providers from the configuration for _, p := range conf.ProviderConfigs { name := p.Name if p.Alias != "" { name += "." + p.Alias } v := t.Concrete(&NodeAbstractProvider{ NameValue: name, PathValue: path, }) // Add it to the graph g.Add(v) fullName := ResolveProviderName(name, path) t.providers[fullName] = v.(GraphNodeProvider) t.proxiable[fullName] = len(p.RawConfig.RawMap()) == 0 } // Now replace the provider nodes with proxy nodes if a provider was being // passed in, and create implicit proxies if there was no config. Any extra // proxies will be removed in the prune step. return t.addProxyProviders(g, m) } func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) error { path := m.Path() // can't add proxies at the root if len(path) == 0 { return nil } parentPath := path[:len(path)-1] parent := t.Module.Child(parentPath) if parent == nil { return nil } var parentCfg *config.Module for _, mod := range parent.Config().Modules { if mod.Name == m.Name() { parentCfg = mod break } } if parentCfg == nil { // this can't really happen during normal execution. return fmt.Errorf("parent module config not found for %s", m.Name()) } // Go through all the providers the parent is passing in, and add proxies to // the parent provider nodes. for name, parentName := range parentCfg.Providers { fullName := ResolveProviderName(name, path) fullParentName := ResolveProviderName(parentName, parentPath) parentProvider := t.providers[fullParentName] if parentProvider == nil { return fmt.Errorf("missing provider %s", fullParentName) } proxy := &graphNodeProxyProvider{ nameValue: name, path: path, target: parentProvider, } concreteProvider := t.providers[fullName] // replace the concrete node with the provider passed in if concreteProvider != nil && t.proxiable[fullName] { g.Replace(concreteProvider, proxy) t.providers[fullName] = proxy continue } // aliased providers can't be implicitly passed in if strings.Contains(name, ".") { continue } // There was no concrete provider, so add this as an implicit provider. // The extra proxy will be pruned later if it's unused. g.Add(proxy) t.providers[fullName] = proxy } return nil } func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { for _, v := range g.Vertices() { // Only care about GraphNodeAttachProvider implementations apn, ok := v.(GraphNodeAttachProvider) if !ok { continue } // Determine what we're looking for path := normalizeModulePath(apn.Path())[1:] name := apn.ProviderName() log.Printf("[TRACE] Attach provider request: %#v %s", path, name) // Get the configuration. tree := t.Module.Child(path) if tree == nil { continue } // Go through the provider configs to find the matching config for _, p := range tree.Config().ProviderConfigs { // Build the name, which is "name.alias" if an alias exists current := p.Name if p.Alias != "" { current += "." + p.Alias } // If the configs match then attach! if current == name { log.Printf("[TRACE] Attaching provider config: %#v", p) apn.AttachProvider(p) break } } } return nil }
random_line_split
predata_externals.go
package backup /* * This file contains structs and functions related to backing up metadata on the * coordinator for objects that connect to external data (external tables and external * protocols). */ import ( "database/sql" "fmt" "strings" "github.com/greenplum-db/gpbackup/toc" "github.com/greenplum-db/gpbackup/utils" ) const ( // Type of external table READABLE = iota READABLE_WEB WRITABLE WRITABLE_WEB // Protocol external table is using FILE GPFDIST GPHDFS HTTP S3 ) type ExternalTableDefinition struct { Oid uint32 Type int Protocol int Location sql.NullString ExecLocation string FormatType string FormatOpts string Command string RejectLimit int RejectLimitType string ErrTableName string ErrTableSchema string LogErrors bool LogErrPersist bool Encoding string Writable bool URIs []string } func PrintExternalTableCreateStatement(metadataFile *utils.FileWithByteCount, toc *toc.TOC, table Table) { start := metadataFile.ByteCount tableTypeStrMap := map[int]string{ READABLE: "READABLE EXTERNAL", READABLE_WEB: "READABLE EXTERNAL WEB", WRITABLE: "WRITABLE EXTERNAL", WRITABLE_WEB: "WRITABLE EXTERNAL WEB", } extTableDef := table.ExtTableDef extTableDef.Type, extTableDef.Protocol = DetermineExternalTableCharacteristics(extTableDef) metadataFile.MustPrintf("\n\nCREATE %s TABLE %s (\n", tableTypeStrMap[extTableDef.Type], table.FQN()) printColumnDefinitions(metadataFile, table.ColumnDefs, "") metadataFile.MustPrintf(") ") PrintExternalTableStatements(metadataFile, table.FQN(), extTableDef) if extTableDef.Writable { metadataFile.MustPrintf("\n%s", table.DistPolicy) } metadataFile.MustPrintf(";") if toc != nil { section, entry := table.GetMetadataEntry() toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) } } func DetermineExternalTableCharacteristics(extTableDef ExternalTableDefinition) (int, int) { isWritable := extTableDef.Writable var tableType int tableProtocol := -1 if !extTableDef.Location.Valid || extTableDef.Location.String == "" { // EXTERNAL WEB tables may have EXECUTE instead of LOCATION tableProtocol = HTTP if isWritable { tableType = WRITABLE_WEB } else { tableType = READABLE_WEB } } else { /* * All data sources must use the same protocol, so we can use Location to determine * the table's protocol even though it only holds one data source URI. */ isWeb := strings.HasPrefix(extTableDef.Location.String, "http") if isWeb && isWritable { tableType = WRITABLE_WEB } else if isWeb && !isWritable { tableType = READABLE_WEB } else if !isWeb && isWritable { tableType = WRITABLE } else { tableType = READABLE } prefix := extTableDef.Location.String[0:strings.Index(extTableDef.Location.String, "://")] switch prefix { case "file": tableProtocol = FILE case "gpfdist": tableProtocol = GPFDIST case "gpfdists": tableProtocol = GPFDIST case "gphdfs": tableProtocol = GPHDFS case "http": tableProtocol = HTTP case "https": tableProtocol = HTTP case "s3": tableProtocol = S3 } } return tableType, tableProtocol } func generateExecuteStatement(extTableDef ExternalTableDefinition) string { var executeStatement string extTableDef.Command = strings.Replace(extTableDef.Command, `'`, `''`, -1) executeStatement += fmt.Sprintf("EXECUTE '%s'", extTableDef.Command) execType := strings.Split(extTableDef.ExecLocation, ":") switch execType[0] { case "ALL_SEGMENTS": // Default case, don't print anything else case "HOST": executeStatement += fmt.Sprintf(" ON HOST '%s'", execType[1]) case "MASTER_ONLY": executeStatement += " ON MASTER" case "COORDINATOR_ONLY": executeStatement += " ON COORDINATOR" case "PER_HOST": executeStatement += " ON HOST" case "SEGMENT_ID": executeStatement += fmt.Sprintf(" ON SEGMENT %s", execType[1]) case "TOTAL_SEGS": executeStatement += fmt.Sprintf(" ON %s", execType[1]) } return executeStatement } /* * This function is adapted from dumputils.c * * Escape backslashes and apostrophes in EXTERNAL TABLE format strings. * Returns a list of unquoted keyword and escaped quoted string tokens * * The fmtopts field of a pg_exttable tuple has an odd encoding -- it is * partially parsed and contains "string" values that aren't legal SQL. * Each string value is delimited by apostrophes and is usually, but not * always, a single character. The fmtopts field is typically something * like {delimiter '\x09' null '\N' escape '\'} or * {delimiter ',' null '' escape '\' quote '''}. Each backslash and * apostrophe in a string must be escaped and each string must be * prepended with an 'E' denoting an "escape syntax" string. * * Usage note: A field value containing an apostrophe followed by a space * will throw this algorithm off -- it presumes no embedded spaces. */ func
(formatOpts string) []string { inString := false resultList := make([]string, 0) currString := "" for i := 0; i < len(formatOpts); i++ { switch formatOpts[i] { case '\'': if inString { /* * Escape apostrophes *within* the string. If the * apostrophe is at the end of the source string or is * followed by a space, it is presumed to be a closing * apostrophe and is not escaped. */ if (i+1) == len(formatOpts) || formatOpts[i+1] == ' ' { inString = false } else { currString += "\\" } } else { currString = "E" inString = true } case '\\': currString += "\\" case ' ': if !inString { resultList = append(resultList, currString) currString = "" continue } } currString += string(formatOpts[i]) } resultList = append(resultList, currString) return resultList } /* * Format options to use `a = b` format because this format is required * when using CUSTOM format. * * We do this for CUSTOM, AVRO and PARQUET, but not CSV or TEXT because * CSV and TEXT have some multi-word options that are difficult * to parse into this format */ func makeCustomFormatOpts(tokens []string) string { var key string var value string resultOpts := make([]string, 0) for i := 0; i < len(tokens)-1; i += 2 { key = tokens[i] value = tokens[i+1] opt := fmt.Sprintf(`%s = %s`, key, value) resultOpts = append(resultOpts, opt) } return strings.Join(resultOpts, ", ") } func GenerateFormatStatement(extTableDef ExternalTableDefinition) string { var formatStatement string formatType := "" switch extTableDef.FormatType { case "t": formatType = "TEXT" case "c": formatType = "CSV" case "b": formatType = "CUSTOM" case "a": formatType = "AVRO" case "p": formatType = "PARQUET" } formatStatement += fmt.Sprintf("FORMAT '%s'", formatType) if extTableDef.FormatOpts != "" { formatTokens := tokenizeAndEscapeFormatOpts(strings.TrimSpace(extTableDef.FormatOpts)) formatOptsString := "" if formatType == "TEXT" || formatType == "CSV" { formatOptsString = strings.Join(formatTokens, " ") } else { formatOptsString = makeCustomFormatOpts(formatTokens) } formatStatement += fmt.Sprintf(" (%s)", formatOptsString) } return formatStatement } func generateLogErrorStatement(extTableDef ExternalTableDefinition) string { logErrorStatement := "" if extTableDef.LogErrors { if extTableDef.LogErrPersist { logErrorStatement += "\nLOG ERRORS PERSISTENTLY" } else { logErrorStatement += "\nLOG ERRORS" } } else if extTableDef.ErrTableName != "" && extTableDef.ErrTableSchema != "" { errTableFQN := utils.MakeFQN(extTableDef.ErrTableSchema, extTableDef.ErrTableName) logErrorStatement += fmt.Sprintf("\nLOG ERRORS INTO %s", errTableFQN) } if extTableDef.RejectLimit != 0 { logErrorStatement += fmt.Sprintf("\nSEGMENT REJECT LIMIT %d ", extTableDef.RejectLimit) switch extTableDef.RejectLimitType { case "r": logErrorStatement += "ROWS" case "p": logErrorStatement += "PERCENT" } } return logErrorStatement } func PrintExternalTableStatements(metadataFile *utils.FileWithByteCount, tableName string, extTableDef ExternalTableDefinition) { if extTableDef.Type != WRITABLE_WEB { if len(extTableDef.URIs) > 0 { metadataFile.MustPrintf("LOCATION (\n\t'%s'\n)", strings.Join(extTableDef.URIs, "',\n\t'")) } } if extTableDef.Type == READABLE || (extTableDef.Type == WRITABLE_WEB && extTableDef.Protocol == S3) { if extTableDef.ExecLocation == "COORDINATOR_ONLY" { metadataFile.MustPrintf(" ON COORDINATOR") } else if extTableDef.ExecLocation == "COORDINATOR_ONLY" { metadataFile.MustPrintf(" ON COORDINATOR") } } if extTableDef.Type == READABLE_WEB || extTableDef.Type == WRITABLE_WEB { if extTableDef.Command != "" { metadataFile.MustPrint(generateExecuteStatement(extTableDef)) } } metadataFile.MustPrintln() metadataFile.MustPrint(GenerateFormatStatement(extTableDef)) metadataFile.MustPrintln() metadataFile.MustPrintf("ENCODING '%s'", extTableDef.Encoding) if extTableDef.Type == READABLE || extTableDef.Type == READABLE_WEB { metadataFile.MustPrint(generateLogErrorStatement(extTableDef)) } } func PrintCreateExternalProtocolStatement(metadataFile *utils.FileWithByteCount, toc *toc.TOC, protocol ExternalProtocol, funcInfoMap map[uint32]FunctionInfo, protoMetadata ObjectMetadata) { start := metadataFile.ByteCount funcOidList := []uint32{protocol.ReadFunction, protocol.WriteFunction, protocol.Validator} hasUserDefinedFunc := false for _, funcOid := range funcOidList { if funcInfo, ok := funcInfoMap[funcOid]; ok && !funcInfo.IsInternal { hasUserDefinedFunc = true } } if hasUserDefinedFunc { protocolFunctions := make([]string, 0) if protocol.ReadFunction != 0 { protocolFunctions = append(protocolFunctions, fmt.Sprintf("readfunc = %s", funcInfoMap[protocol.ReadFunction].QualifiedName)) } if protocol.WriteFunction != 0 { protocolFunctions = append(protocolFunctions, fmt.Sprintf("writefunc = %s", funcInfoMap[protocol.WriteFunction].QualifiedName)) } if protocol.Validator != 0 { protocolFunctions = append(protocolFunctions, fmt.Sprintf("validatorfunc = %s", funcInfoMap[protocol.Validator].QualifiedName)) } metadataFile.MustPrintf("\n\nCREATE ") if protocol.Trusted { metadataFile.MustPrintf("TRUSTED ") } metadataFile.MustPrintf("PROTOCOL %s (%s);\n", protocol.Name, strings.Join(protocolFunctions, ", ")) section, entry := protocol.GetMetadataEntry() toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) } PrintObjectMetadata(metadataFile, toc, protoMetadata, protocol, "") } func PrintExchangeExternalPartitionStatements(metadataFile *utils.FileWithByteCount, toc *toc.TOC, extPartitions []PartitionInfo, partInfoMap map[uint32]PartitionInfo, tables []Table) { tableNameMap := make(map[uint32]string, len(tables)) for _, table := range tables { tableNameMap[table.Oid] = table.FQN() } for _, externalPartition := range extPartitions { extPartRelationName := tableNameMap[externalPartition.RelationOid] if extPartRelationName == "" { continue //Not included in the list of tables to back up } parentRelationName := utils.MakeFQN(externalPartition.ParentSchema, externalPartition.ParentRelationName) start := metadataFile.ByteCount alterPartitionStr := "" currentPartition := externalPartition for currentPartition.PartitionParentRuleOid != 0 { parent := partInfoMap[currentPartition.PartitionParentRuleOid] if parent.PartitionName == "" { alterPartitionStr = fmt.Sprintf("ALTER PARTITION FOR (RANK(%d)) ", parent.PartitionRank) + alterPartitionStr } else { alterPartitionStr = fmt.Sprintf("ALTER PARTITION %s ", parent.PartitionName) + alterPartitionStr } currentPartition = parent } metadataFile.MustPrintf("\n\nALTER TABLE %s %s", parentRelationName, alterPartitionStr) if externalPartition.PartitionName == "" { metadataFile.MustPrintf("EXCHANGE PARTITION FOR (RANK(%d)) ", externalPartition.PartitionRank) } else { metadataFile.MustPrintf("EXCHANGE PARTITION %s ", externalPartition.PartitionName) } metadataFile.MustPrintf("WITH TABLE %s WITHOUT VALIDATION;", extPartRelationName) metadataFile.MustPrintf("\n\nDROP TABLE %s;", extPartRelationName) section, entry := externalPartition.GetMetadataEntry() toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) } }
tokenizeAndEscapeFormatOpts
identifier_name
predata_externals.go
package backup /* * This file contains structs and functions related to backing up metadata on the * coordinator for objects that connect to external data (external tables and external * protocols). */ import ( "database/sql" "fmt" "strings" "github.com/greenplum-db/gpbackup/toc" "github.com/greenplum-db/gpbackup/utils" ) const ( // Type of external table READABLE = iota READABLE_WEB WRITABLE WRITABLE_WEB // Protocol external table is using FILE GPFDIST GPHDFS HTTP S3 ) type ExternalTableDefinition struct { Oid uint32 Type int Protocol int Location sql.NullString ExecLocation string FormatType string FormatOpts string Command string RejectLimit int RejectLimitType string ErrTableName string ErrTableSchema string LogErrors bool LogErrPersist bool Encoding string Writable bool URIs []string } func PrintExternalTableCreateStatement(metadataFile *utils.FileWithByteCount, toc *toc.TOC, table Table) { start := metadataFile.ByteCount tableTypeStrMap := map[int]string{ READABLE: "READABLE EXTERNAL", READABLE_WEB: "READABLE EXTERNAL WEB", WRITABLE: "WRITABLE EXTERNAL", WRITABLE_WEB: "WRITABLE EXTERNAL WEB", } extTableDef := table.ExtTableDef extTableDef.Type, extTableDef.Protocol = DetermineExternalTableCharacteristics(extTableDef) metadataFile.MustPrintf("\n\nCREATE %s TABLE %s (\n", tableTypeStrMap[extTableDef.Type], table.FQN()) printColumnDefinitions(metadataFile, table.ColumnDefs, "") metadataFile.MustPrintf(") ") PrintExternalTableStatements(metadataFile, table.FQN(), extTableDef) if extTableDef.Writable { metadataFile.MustPrintf("\n%s", table.DistPolicy) } metadataFile.MustPrintf(";") if toc != nil { section, entry := table.GetMetadataEntry() toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) } } func DetermineExternalTableCharacteristics(extTableDef ExternalTableDefinition) (int, int) { isWritable := extTableDef.Writable var tableType int tableProtocol := -1 if !extTableDef.Location.Valid || extTableDef.Location.String == "" { // EXTERNAL WEB tables may have EXECUTE instead of LOCATION tableProtocol = HTTP if isWritable { tableType = WRITABLE_WEB } else { tableType = READABLE_WEB } } else { /* * All data sources must use the same protocol, so we can use Location to determine * the table's protocol even though it only holds one data source URI. */ isWeb := strings.HasPrefix(extTableDef.Location.String, "http") if isWeb && isWritable { tableType = WRITABLE_WEB } else if isWeb && !isWritable { tableType = READABLE_WEB } else if !isWeb && isWritable { tableType = WRITABLE } else { tableType = READABLE } prefix := extTableDef.Location.String[0:strings.Index(extTableDef.Location.String, "://")] switch prefix { case "file": tableProtocol = FILE case "gpfdist": tableProtocol = GPFDIST case "gpfdists": tableProtocol = GPFDIST case "gphdfs": tableProtocol = GPHDFS case "http": tableProtocol = HTTP case "https": tableProtocol = HTTP case "s3": tableProtocol = S3 } } return tableType, tableProtocol } func generateExecuteStatement(extTableDef ExternalTableDefinition) string { var executeStatement string extTableDef.Command = strings.Replace(extTableDef.Command, `'`, `''`, -1) executeStatement += fmt.Sprintf("EXECUTE '%s'", extTableDef.Command) execType := strings.Split(extTableDef.ExecLocation, ":") switch execType[0] { case "ALL_SEGMENTS": // Default case, don't print anything else case "HOST": executeStatement += fmt.Sprintf(" ON HOST '%s'", execType[1]) case "MASTER_ONLY": executeStatement += " ON MASTER" case "COORDINATOR_ONLY": executeStatement += " ON COORDINATOR" case "PER_HOST": executeStatement += " ON HOST" case "SEGMENT_ID": executeStatement += fmt.Sprintf(" ON SEGMENT %s", execType[1]) case "TOTAL_SEGS": executeStatement += fmt.Sprintf(" ON %s", execType[1]) } return executeStatement } /* * This function is adapted from dumputils.c * * Escape backslashes and apostrophes in EXTERNAL TABLE format strings. * Returns a list of unquoted keyword and escaped quoted string tokens * * The fmtopts field of a pg_exttable tuple has an odd encoding -- it is * partially parsed and contains "string" values that aren't legal SQL. * Each string value is delimited by apostrophes and is usually, but not * always, a single character. The fmtopts field is typically something * like {delimiter '\x09' null '\N' escape '\'} or * {delimiter ',' null '' escape '\' quote '''}. Each backslash and * apostrophe in a string must be escaped and each string must be * prepended with an 'E' denoting an "escape syntax" string. * * Usage note: A field value containing an apostrophe followed by a space * will throw this algorithm off -- it presumes no embedded spaces. */ func tokenizeAndEscapeFormatOpts(formatOpts string) []string { inString := false resultList := make([]string, 0) currString := "" for i := 0; i < len(formatOpts); i++ { switch formatOpts[i] { case '\'': if inString { /* * Escape apostrophes *within* the string. If the * apostrophe is at the end of the source string or is * followed by a space, it is presumed to be a closing * apostrophe and is not escaped. */ if (i+1) == len(formatOpts) || formatOpts[i+1] == ' ' { inString = false } else { currString += "\\" } } else { currString = "E" inString = true } case '\\': currString += "\\" case ' ': if !inString { resultList = append(resultList, currString) currString = "" continue } } currString += string(formatOpts[i]) } resultList = append(resultList, currString) return resultList } /* * Format options to use `a = b` format because this format is required * when using CUSTOM format. * * We do this for CUSTOM, AVRO and PARQUET, but not CSV or TEXT because * CSV and TEXT have some multi-word options that are difficult * to parse into this format */ func makeCustomFormatOpts(tokens []string) string { var key string var value string resultOpts := make([]string, 0) for i := 0; i < len(tokens)-1; i += 2 { key = tokens[i] value = tokens[i+1] opt := fmt.Sprintf(`%s = %s`, key, value) resultOpts = append(resultOpts, opt) } return strings.Join(resultOpts, ", ") } func GenerateFormatStatement(extTableDef ExternalTableDefinition) string { var formatStatement string formatType := "" switch extTableDef.FormatType { case "t": formatType = "TEXT" case "c": formatType = "CSV" case "b": formatType = "CUSTOM" case "a": formatType = "AVRO" case "p": formatType = "PARQUET" } formatStatement += fmt.Sprintf("FORMAT '%s'", formatType) if extTableDef.FormatOpts != "" { formatTokens := tokenizeAndEscapeFormatOpts(strings.TrimSpace(extTableDef.FormatOpts)) formatOptsString := "" if formatType == "TEXT" || formatType == "CSV" { formatOptsString = strings.Join(formatTokens, " ") } else { formatOptsString = makeCustomFormatOpts(formatTokens) } formatStatement += fmt.Sprintf(" (%s)", formatOptsString) } return formatStatement } func generateLogErrorStatement(extTableDef ExternalTableDefinition) string { logErrorStatement := "" if extTableDef.LogErrors { if extTableDef.LogErrPersist { logErrorStatement += "\nLOG ERRORS PERSISTENTLY" } else { logErrorStatement += "\nLOG ERRORS" } } else if extTableDef.ErrTableName != "" && extTableDef.ErrTableSchema != "" { errTableFQN := utils.MakeFQN(extTableDef.ErrTableSchema, extTableDef.ErrTableName) logErrorStatement += fmt.Sprintf("\nLOG ERRORS INTO %s", errTableFQN) } if extTableDef.RejectLimit != 0 { logErrorStatement += fmt.Sprintf("\nSEGMENT REJECT LIMIT %d ", extTableDef.RejectLimit) switch extTableDef.RejectLimitType { case "r": logErrorStatement += "ROWS" case "p": logErrorStatement += "PERCENT" } } return logErrorStatement } func PrintExternalTableStatements(metadataFile *utils.FileWithByteCount, tableName string, extTableDef ExternalTableDefinition) { if extTableDef.Type != WRITABLE_WEB { if len(extTableDef.URIs) > 0 { metadataFile.MustPrintf("LOCATION (\n\t'%s'\n)", strings.Join(extTableDef.URIs, "',\n\t'")) } } if extTableDef.Type == READABLE || (extTableDef.Type == WRITABLE_WEB && extTableDef.Protocol == S3) { if extTableDef.ExecLocation == "COORDINATOR_ONLY" { metadataFile.MustPrintf(" ON COORDINATOR") } else if extTableDef.ExecLocation == "COORDINATOR_ONLY"
} if extTableDef.Type == READABLE_WEB || extTableDef.Type == WRITABLE_WEB { if extTableDef.Command != "" { metadataFile.MustPrint(generateExecuteStatement(extTableDef)) } } metadataFile.MustPrintln() metadataFile.MustPrint(GenerateFormatStatement(extTableDef)) metadataFile.MustPrintln() metadataFile.MustPrintf("ENCODING '%s'", extTableDef.Encoding) if extTableDef.Type == READABLE || extTableDef.Type == READABLE_WEB { metadataFile.MustPrint(generateLogErrorStatement(extTableDef)) } } func PrintCreateExternalProtocolStatement(metadataFile *utils.FileWithByteCount, toc *toc.TOC, protocol ExternalProtocol, funcInfoMap map[uint32]FunctionInfo, protoMetadata ObjectMetadata) { start := metadataFile.ByteCount funcOidList := []uint32{protocol.ReadFunction, protocol.WriteFunction, protocol.Validator} hasUserDefinedFunc := false for _, funcOid := range funcOidList { if funcInfo, ok := funcInfoMap[funcOid]; ok && !funcInfo.IsInternal { hasUserDefinedFunc = true } } if hasUserDefinedFunc { protocolFunctions := make([]string, 0) if protocol.ReadFunction != 0 { protocolFunctions = append(protocolFunctions, fmt.Sprintf("readfunc = %s", funcInfoMap[protocol.ReadFunction].QualifiedName)) } if protocol.WriteFunction != 0 { protocolFunctions = append(protocolFunctions, fmt.Sprintf("writefunc = %s", funcInfoMap[protocol.WriteFunction].QualifiedName)) } if protocol.Validator != 0 { protocolFunctions = append(protocolFunctions, fmt.Sprintf("validatorfunc = %s", funcInfoMap[protocol.Validator].QualifiedName)) } metadataFile.MustPrintf("\n\nCREATE ") if protocol.Trusted { metadataFile.MustPrintf("TRUSTED ") } metadataFile.MustPrintf("PROTOCOL %s (%s);\n", protocol.Name, strings.Join(protocolFunctions, ", ")) section, entry := protocol.GetMetadataEntry() toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) } PrintObjectMetadata(metadataFile, toc, protoMetadata, protocol, "") } func PrintExchangeExternalPartitionStatements(metadataFile *utils.FileWithByteCount, toc *toc.TOC, extPartitions []PartitionInfo, partInfoMap map[uint32]PartitionInfo, tables []Table) { tableNameMap := make(map[uint32]string, len(tables)) for _, table := range tables { tableNameMap[table.Oid] = table.FQN() } for _, externalPartition := range extPartitions { extPartRelationName := tableNameMap[externalPartition.RelationOid] if extPartRelationName == "" { continue //Not included in the list of tables to back up } parentRelationName := utils.MakeFQN(externalPartition.ParentSchema, externalPartition.ParentRelationName) start := metadataFile.ByteCount alterPartitionStr := "" currentPartition := externalPartition for currentPartition.PartitionParentRuleOid != 0 { parent := partInfoMap[currentPartition.PartitionParentRuleOid] if parent.PartitionName == "" { alterPartitionStr = fmt.Sprintf("ALTER PARTITION FOR (RANK(%d)) ", parent.PartitionRank) + alterPartitionStr } else { alterPartitionStr = fmt.Sprintf("ALTER PARTITION %s ", parent.PartitionName) + alterPartitionStr } currentPartition = parent } metadataFile.MustPrintf("\n\nALTER TABLE %s %s", parentRelationName, alterPartitionStr) if externalPartition.PartitionName == "" { metadataFile.MustPrintf("EXCHANGE PARTITION FOR (RANK(%d)) ", externalPartition.PartitionRank) } else { metadataFile.MustPrintf("EXCHANGE PARTITION %s ", externalPartition.PartitionName) } metadataFile.MustPrintf("WITH TABLE %s WITHOUT VALIDATION;", extPartRelationName) metadataFile.MustPrintf("\n\nDROP TABLE %s;", extPartRelationName) section, entry := externalPartition.GetMetadataEntry() toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) } }
{ metadataFile.MustPrintf(" ON COORDINATOR") }
conditional_block
predata_externals.go
package backup /* * This file contains structs and functions related to backing up metadata on the * coordinator for objects that connect to external data (external tables and external * protocols). */ import ( "database/sql" "fmt" "strings" "github.com/greenplum-db/gpbackup/toc" "github.com/greenplum-db/gpbackup/utils" ) const ( // Type of external table READABLE = iota READABLE_WEB WRITABLE WRITABLE_WEB // Protocol external table is using FILE GPFDIST GPHDFS HTTP S3 ) type ExternalTableDefinition struct { Oid uint32 Type int Protocol int Location sql.NullString ExecLocation string FormatType string FormatOpts string Command string RejectLimit int RejectLimitType string ErrTableName string ErrTableSchema string LogErrors bool LogErrPersist bool Encoding string Writable bool URIs []string } func PrintExternalTableCreateStatement(metadataFile *utils.FileWithByteCount, toc *toc.TOC, table Table)
func DetermineExternalTableCharacteristics(extTableDef ExternalTableDefinition) (int, int) { isWritable := extTableDef.Writable var tableType int tableProtocol := -1 if !extTableDef.Location.Valid || extTableDef.Location.String == "" { // EXTERNAL WEB tables may have EXECUTE instead of LOCATION tableProtocol = HTTP if isWritable { tableType = WRITABLE_WEB } else { tableType = READABLE_WEB } } else { /* * All data sources must use the same protocol, so we can use Location to determine * the table's protocol even though it only holds one data source URI. */ isWeb := strings.HasPrefix(extTableDef.Location.String, "http") if isWeb && isWritable { tableType = WRITABLE_WEB } else if isWeb && !isWritable { tableType = READABLE_WEB } else if !isWeb && isWritable { tableType = WRITABLE } else { tableType = READABLE } prefix := extTableDef.Location.String[0:strings.Index(extTableDef.Location.String, "://")] switch prefix { case "file": tableProtocol = FILE case "gpfdist": tableProtocol = GPFDIST case "gpfdists": tableProtocol = GPFDIST case "gphdfs": tableProtocol = GPHDFS case "http": tableProtocol = HTTP case "https": tableProtocol = HTTP case "s3": tableProtocol = S3 } } return tableType, tableProtocol } func generateExecuteStatement(extTableDef ExternalTableDefinition) string { var executeStatement string extTableDef.Command = strings.Replace(extTableDef.Command, `'`, `''`, -1) executeStatement += fmt.Sprintf("EXECUTE '%s'", extTableDef.Command) execType := strings.Split(extTableDef.ExecLocation, ":") switch execType[0] { case "ALL_SEGMENTS": // Default case, don't print anything else case "HOST": executeStatement += fmt.Sprintf(" ON HOST '%s'", execType[1]) case "MASTER_ONLY": executeStatement += " ON MASTER" case "COORDINATOR_ONLY": executeStatement += " ON COORDINATOR" case "PER_HOST": executeStatement += " ON HOST" case "SEGMENT_ID": executeStatement += fmt.Sprintf(" ON SEGMENT %s", execType[1]) case "TOTAL_SEGS": executeStatement += fmt.Sprintf(" ON %s", execType[1]) } return executeStatement } /* * This function is adapted from dumputils.c * * Escape backslashes and apostrophes in EXTERNAL TABLE format strings. * Returns a list of unquoted keyword and escaped quoted string tokens * * The fmtopts field of a pg_exttable tuple has an odd encoding -- it is * partially parsed and contains "string" values that aren't legal SQL. * Each string value is delimited by apostrophes and is usually, but not * always, a single character. The fmtopts field is typically something * like {delimiter '\x09' null '\N' escape '\'} or * {delimiter ',' null '' escape '\' quote '''}. Each backslash and * apostrophe in a string must be escaped and each string must be * prepended with an 'E' denoting an "escape syntax" string. * * Usage note: A field value containing an apostrophe followed by a space * will throw this algorithm off -- it presumes no embedded spaces. */ func tokenizeAndEscapeFormatOpts(formatOpts string) []string { inString := false resultList := make([]string, 0) currString := "" for i := 0; i < len(formatOpts); i++ { switch formatOpts[i] { case '\'': if inString { /* * Escape apostrophes *within* the string. If the * apostrophe is at the end of the source string or is * followed by a space, it is presumed to be a closing * apostrophe and is not escaped. */ if (i+1) == len(formatOpts) || formatOpts[i+1] == ' ' { inString = false } else { currString += "\\" } } else { currString = "E" inString = true } case '\\': currString += "\\" case ' ': if !inString { resultList = append(resultList, currString) currString = "" continue } } currString += string(formatOpts[i]) } resultList = append(resultList, currString) return resultList } /* * Format options to use `a = b` format because this format is required * when using CUSTOM format. * * We do this for CUSTOM, AVRO and PARQUET, but not CSV or TEXT because * CSV and TEXT have some multi-word options that are difficult * to parse into this format */ func makeCustomFormatOpts(tokens []string) string { var key string var value string resultOpts := make([]string, 0) for i := 0; i < len(tokens)-1; i += 2 { key = tokens[i] value = tokens[i+1] opt := fmt.Sprintf(`%s = %s`, key, value) resultOpts = append(resultOpts, opt) } return strings.Join(resultOpts, ", ") } func GenerateFormatStatement(extTableDef ExternalTableDefinition) string { var formatStatement string formatType := "" switch extTableDef.FormatType { case "t": formatType = "TEXT" case "c": formatType = "CSV" case "b": formatType = "CUSTOM" case "a": formatType = "AVRO" case "p": formatType = "PARQUET" } formatStatement += fmt.Sprintf("FORMAT '%s'", formatType) if extTableDef.FormatOpts != "" { formatTokens := tokenizeAndEscapeFormatOpts(strings.TrimSpace(extTableDef.FormatOpts)) formatOptsString := "" if formatType == "TEXT" || formatType == "CSV" { formatOptsString = strings.Join(formatTokens, " ") } else { formatOptsString = makeCustomFormatOpts(formatTokens) } formatStatement += fmt.Sprintf(" (%s)", formatOptsString) } return formatStatement } func generateLogErrorStatement(extTableDef ExternalTableDefinition) string { logErrorStatement := "" if extTableDef.LogErrors { if extTableDef.LogErrPersist { logErrorStatement += "\nLOG ERRORS PERSISTENTLY" } else { logErrorStatement += "\nLOG ERRORS" } } else if extTableDef.ErrTableName != "" && extTableDef.ErrTableSchema != "" { errTableFQN := utils.MakeFQN(extTableDef.ErrTableSchema, extTableDef.ErrTableName) logErrorStatement += fmt.Sprintf("\nLOG ERRORS INTO %s", errTableFQN) } if extTableDef.RejectLimit != 0 { logErrorStatement += fmt.Sprintf("\nSEGMENT REJECT LIMIT %d ", extTableDef.RejectLimit) switch extTableDef.RejectLimitType { case "r": logErrorStatement += "ROWS" case "p": logErrorStatement += "PERCENT" } } return logErrorStatement } func PrintExternalTableStatements(metadataFile *utils.FileWithByteCount, tableName string, extTableDef ExternalTableDefinition) { if extTableDef.Type != WRITABLE_WEB { if len(extTableDef.URIs) > 0 { metadataFile.MustPrintf("LOCATION (\n\t'%s'\n)", strings.Join(extTableDef.URIs, "',\n\t'")) } } if extTableDef.Type == READABLE || (extTableDef.Type == WRITABLE_WEB && extTableDef.Protocol == S3) { if extTableDef.ExecLocation == "COORDINATOR_ONLY" { metadataFile.MustPrintf(" ON COORDINATOR") } else if extTableDef.ExecLocation == "COORDINATOR_ONLY" { metadataFile.MustPrintf(" ON COORDINATOR") } } if extTableDef.Type == READABLE_WEB || extTableDef.Type == WRITABLE_WEB { if extTableDef.Command != "" { metadataFile.MustPrint(generateExecuteStatement(extTableDef)) } } metadataFile.MustPrintln() metadataFile.MustPrint(GenerateFormatStatement(extTableDef)) metadataFile.MustPrintln() metadataFile.MustPrintf("ENCODING '%s'", extTableDef.Encoding) if extTableDef.Type == READABLE || extTableDef.Type == READABLE_WEB { metadataFile.MustPrint(generateLogErrorStatement(extTableDef)) } } func PrintCreateExternalProtocolStatement(metadataFile *utils.FileWithByteCount, toc *toc.TOC, protocol ExternalProtocol, funcInfoMap map[uint32]FunctionInfo, protoMetadata ObjectMetadata) { start := metadataFile.ByteCount funcOidList := []uint32{protocol.ReadFunction, protocol.WriteFunction, protocol.Validator} hasUserDefinedFunc := false for _, funcOid := range funcOidList { if funcInfo, ok := funcInfoMap[funcOid]; ok && !funcInfo.IsInternal { hasUserDefinedFunc = true } } if hasUserDefinedFunc { protocolFunctions := make([]string, 0) if protocol.ReadFunction != 0 { protocolFunctions = append(protocolFunctions, fmt.Sprintf("readfunc = %s", funcInfoMap[protocol.ReadFunction].QualifiedName)) } if protocol.WriteFunction != 0 { protocolFunctions = append(protocolFunctions, fmt.Sprintf("writefunc = %s", funcInfoMap[protocol.WriteFunction].QualifiedName)) } if protocol.Validator != 0 { protocolFunctions = append(protocolFunctions, fmt.Sprintf("validatorfunc = %s", funcInfoMap[protocol.Validator].QualifiedName)) } metadataFile.MustPrintf("\n\nCREATE ") if protocol.Trusted { metadataFile.MustPrintf("TRUSTED ") } metadataFile.MustPrintf("PROTOCOL %s (%s);\n", protocol.Name, strings.Join(protocolFunctions, ", ")) section, entry := protocol.GetMetadataEntry() toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) } PrintObjectMetadata(metadataFile, toc, protoMetadata, protocol, "") } func PrintExchangeExternalPartitionStatements(metadataFile *utils.FileWithByteCount, toc *toc.TOC, extPartitions []PartitionInfo, partInfoMap map[uint32]PartitionInfo, tables []Table) { tableNameMap := make(map[uint32]string, len(tables)) for _, table := range tables { tableNameMap[table.Oid] = table.FQN() } for _, externalPartition := range extPartitions { extPartRelationName := tableNameMap[externalPartition.RelationOid] if extPartRelationName == "" { continue //Not included in the list of tables to back up } parentRelationName := utils.MakeFQN(externalPartition.ParentSchema, externalPartition.ParentRelationName) start := metadataFile.ByteCount alterPartitionStr := "" currentPartition := externalPartition for currentPartition.PartitionParentRuleOid != 0 { parent := partInfoMap[currentPartition.PartitionParentRuleOid] if parent.PartitionName == "" { alterPartitionStr = fmt.Sprintf("ALTER PARTITION FOR (RANK(%d)) ", parent.PartitionRank) + alterPartitionStr } else { alterPartitionStr = fmt.Sprintf("ALTER PARTITION %s ", parent.PartitionName) + alterPartitionStr } currentPartition = parent } metadataFile.MustPrintf("\n\nALTER TABLE %s %s", parentRelationName, alterPartitionStr) if externalPartition.PartitionName == "" { metadataFile.MustPrintf("EXCHANGE PARTITION FOR (RANK(%d)) ", externalPartition.PartitionRank) } else { metadataFile.MustPrintf("EXCHANGE PARTITION %s ", externalPartition.PartitionName) } metadataFile.MustPrintf("WITH TABLE %s WITHOUT VALIDATION;", extPartRelationName) metadataFile.MustPrintf("\n\nDROP TABLE %s;", extPartRelationName) section, entry := externalPartition.GetMetadataEntry() toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) } }
{ start := metadataFile.ByteCount tableTypeStrMap := map[int]string{ READABLE: "READABLE EXTERNAL", READABLE_WEB: "READABLE EXTERNAL WEB", WRITABLE: "WRITABLE EXTERNAL", WRITABLE_WEB: "WRITABLE EXTERNAL WEB", } extTableDef := table.ExtTableDef extTableDef.Type, extTableDef.Protocol = DetermineExternalTableCharacteristics(extTableDef) metadataFile.MustPrintf("\n\nCREATE %s TABLE %s (\n", tableTypeStrMap[extTableDef.Type], table.FQN()) printColumnDefinitions(metadataFile, table.ColumnDefs, "") metadataFile.MustPrintf(") ") PrintExternalTableStatements(metadataFile, table.FQN(), extTableDef) if extTableDef.Writable { metadataFile.MustPrintf("\n%s", table.DistPolicy) } metadataFile.MustPrintf(";") if toc != nil { section, entry := table.GetMetadataEntry() toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) } }
identifier_body
predata_externals.go
package backup /* * This file contains structs and functions related to backing up metadata on the * coordinator for objects that connect to external data (external tables and external * protocols). */
"github.com/greenplum-db/gpbackup/toc" "github.com/greenplum-db/gpbackup/utils" ) const ( // Type of external table READABLE = iota READABLE_WEB WRITABLE WRITABLE_WEB // Protocol external table is using FILE GPFDIST GPHDFS HTTP S3 ) type ExternalTableDefinition struct { Oid uint32 Type int Protocol int Location sql.NullString ExecLocation string FormatType string FormatOpts string Command string RejectLimit int RejectLimitType string ErrTableName string ErrTableSchema string LogErrors bool LogErrPersist bool Encoding string Writable bool URIs []string } func PrintExternalTableCreateStatement(metadataFile *utils.FileWithByteCount, toc *toc.TOC, table Table) { start := metadataFile.ByteCount tableTypeStrMap := map[int]string{ READABLE: "READABLE EXTERNAL", READABLE_WEB: "READABLE EXTERNAL WEB", WRITABLE: "WRITABLE EXTERNAL", WRITABLE_WEB: "WRITABLE EXTERNAL WEB", } extTableDef := table.ExtTableDef extTableDef.Type, extTableDef.Protocol = DetermineExternalTableCharacteristics(extTableDef) metadataFile.MustPrintf("\n\nCREATE %s TABLE %s (\n", tableTypeStrMap[extTableDef.Type], table.FQN()) printColumnDefinitions(metadataFile, table.ColumnDefs, "") metadataFile.MustPrintf(") ") PrintExternalTableStatements(metadataFile, table.FQN(), extTableDef) if extTableDef.Writable { metadataFile.MustPrintf("\n%s", table.DistPolicy) } metadataFile.MustPrintf(";") if toc != nil { section, entry := table.GetMetadataEntry() toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) } } func DetermineExternalTableCharacteristics(extTableDef ExternalTableDefinition) (int, int) { isWritable := extTableDef.Writable var tableType int tableProtocol := -1 if !extTableDef.Location.Valid || extTableDef.Location.String == "" { // EXTERNAL WEB tables may have EXECUTE instead of LOCATION tableProtocol = HTTP if isWritable { tableType = WRITABLE_WEB } else { tableType = READABLE_WEB } } else { /* * All data sources must use the same protocol, so we can use Location to determine * the table's protocol even though it only holds one data source URI. */ isWeb := strings.HasPrefix(extTableDef.Location.String, "http") if isWeb && isWritable { tableType = WRITABLE_WEB } else if isWeb && !isWritable { tableType = READABLE_WEB } else if !isWeb && isWritable { tableType = WRITABLE } else { tableType = READABLE } prefix := extTableDef.Location.String[0:strings.Index(extTableDef.Location.String, "://")] switch prefix { case "file": tableProtocol = FILE case "gpfdist": tableProtocol = GPFDIST case "gpfdists": tableProtocol = GPFDIST case "gphdfs": tableProtocol = GPHDFS case "http": tableProtocol = HTTP case "https": tableProtocol = HTTP case "s3": tableProtocol = S3 } } return tableType, tableProtocol } func generateExecuteStatement(extTableDef ExternalTableDefinition) string { var executeStatement string extTableDef.Command = strings.Replace(extTableDef.Command, `'`, `''`, -1) executeStatement += fmt.Sprintf("EXECUTE '%s'", extTableDef.Command) execType := strings.Split(extTableDef.ExecLocation, ":") switch execType[0] { case "ALL_SEGMENTS": // Default case, don't print anything else case "HOST": executeStatement += fmt.Sprintf(" ON HOST '%s'", execType[1]) case "MASTER_ONLY": executeStatement += " ON MASTER" case "COORDINATOR_ONLY": executeStatement += " ON COORDINATOR" case "PER_HOST": executeStatement += " ON HOST" case "SEGMENT_ID": executeStatement += fmt.Sprintf(" ON SEGMENT %s", execType[1]) case "TOTAL_SEGS": executeStatement += fmt.Sprintf(" ON %s", execType[1]) } return executeStatement } /* * This function is adapted from dumputils.c * * Escape backslashes and apostrophes in EXTERNAL TABLE format strings. * Returns a list of unquoted keyword and escaped quoted string tokens * * The fmtopts field of a pg_exttable tuple has an odd encoding -- it is * partially parsed and contains "string" values that aren't legal SQL. * Each string value is delimited by apostrophes and is usually, but not * always, a single character. The fmtopts field is typically something * like {delimiter '\x09' null '\N' escape '\'} or * {delimiter ',' null '' escape '\' quote '''}. Each backslash and * apostrophe in a string must be escaped and each string must be * prepended with an 'E' denoting an "escape syntax" string. * * Usage note: A field value containing an apostrophe followed by a space * will throw this algorithm off -- it presumes no embedded spaces. */ func tokenizeAndEscapeFormatOpts(formatOpts string) []string { inString := false resultList := make([]string, 0) currString := "" for i := 0; i < len(formatOpts); i++ { switch formatOpts[i] { case '\'': if inString { /* * Escape apostrophes *within* the string. If the * apostrophe is at the end of the source string or is * followed by a space, it is presumed to be a closing * apostrophe and is not escaped. */ if (i+1) == len(formatOpts) || formatOpts[i+1] == ' ' { inString = false } else { currString += "\\" } } else { currString = "E" inString = true } case '\\': currString += "\\" case ' ': if !inString { resultList = append(resultList, currString) currString = "" continue } } currString += string(formatOpts[i]) } resultList = append(resultList, currString) return resultList } /* * Format options to use `a = b` format because this format is required * when using CUSTOM format. * * We do this for CUSTOM, AVRO and PARQUET, but not CSV or TEXT because * CSV and TEXT have some multi-word options that are difficult * to parse into this format */ func makeCustomFormatOpts(tokens []string) string { var key string var value string resultOpts := make([]string, 0) for i := 0; i < len(tokens)-1; i += 2 { key = tokens[i] value = tokens[i+1] opt := fmt.Sprintf(`%s = %s`, key, value) resultOpts = append(resultOpts, opt) } return strings.Join(resultOpts, ", ") } func GenerateFormatStatement(extTableDef ExternalTableDefinition) string { var formatStatement string formatType := "" switch extTableDef.FormatType { case "t": formatType = "TEXT" case "c": formatType = "CSV" case "b": formatType = "CUSTOM" case "a": formatType = "AVRO" case "p": formatType = "PARQUET" } formatStatement += fmt.Sprintf("FORMAT '%s'", formatType) if extTableDef.FormatOpts != "" { formatTokens := tokenizeAndEscapeFormatOpts(strings.TrimSpace(extTableDef.FormatOpts)) formatOptsString := "" if formatType == "TEXT" || formatType == "CSV" { formatOptsString = strings.Join(formatTokens, " ") } else { formatOptsString = makeCustomFormatOpts(formatTokens) } formatStatement += fmt.Sprintf(" (%s)", formatOptsString) } return formatStatement } func generateLogErrorStatement(extTableDef ExternalTableDefinition) string { logErrorStatement := "" if extTableDef.LogErrors { if extTableDef.LogErrPersist { logErrorStatement += "\nLOG ERRORS PERSISTENTLY" } else { logErrorStatement += "\nLOG ERRORS" } } else if extTableDef.ErrTableName != "" && extTableDef.ErrTableSchema != "" { errTableFQN := utils.MakeFQN(extTableDef.ErrTableSchema, extTableDef.ErrTableName) logErrorStatement += fmt.Sprintf("\nLOG ERRORS INTO %s", errTableFQN) } if extTableDef.RejectLimit != 0 { logErrorStatement += fmt.Sprintf("\nSEGMENT REJECT LIMIT %d ", extTableDef.RejectLimit) switch extTableDef.RejectLimitType { case "r": logErrorStatement += "ROWS" case "p": logErrorStatement += "PERCENT" } } return logErrorStatement } func PrintExternalTableStatements(metadataFile *utils.FileWithByteCount, tableName string, extTableDef ExternalTableDefinition) { if extTableDef.Type != WRITABLE_WEB { if len(extTableDef.URIs) > 0 { metadataFile.MustPrintf("LOCATION (\n\t'%s'\n)", strings.Join(extTableDef.URIs, "',\n\t'")) } } if extTableDef.Type == READABLE || (extTableDef.Type == WRITABLE_WEB && extTableDef.Protocol == S3) { if extTableDef.ExecLocation == "COORDINATOR_ONLY" { metadataFile.MustPrintf(" ON COORDINATOR") } else if extTableDef.ExecLocation == "COORDINATOR_ONLY" { metadataFile.MustPrintf(" ON COORDINATOR") } } if extTableDef.Type == READABLE_WEB || extTableDef.Type == WRITABLE_WEB { if extTableDef.Command != "" { metadataFile.MustPrint(generateExecuteStatement(extTableDef)) } } metadataFile.MustPrintln() metadataFile.MustPrint(GenerateFormatStatement(extTableDef)) metadataFile.MustPrintln() metadataFile.MustPrintf("ENCODING '%s'", extTableDef.Encoding) if extTableDef.Type == READABLE || extTableDef.Type == READABLE_WEB { metadataFile.MustPrint(generateLogErrorStatement(extTableDef)) } } func PrintCreateExternalProtocolStatement(metadataFile *utils.FileWithByteCount, toc *toc.TOC, protocol ExternalProtocol, funcInfoMap map[uint32]FunctionInfo, protoMetadata ObjectMetadata) { start := metadataFile.ByteCount funcOidList := []uint32{protocol.ReadFunction, protocol.WriteFunction, protocol.Validator} hasUserDefinedFunc := false for _, funcOid := range funcOidList { if funcInfo, ok := funcInfoMap[funcOid]; ok && !funcInfo.IsInternal { hasUserDefinedFunc = true } } if hasUserDefinedFunc { protocolFunctions := make([]string, 0) if protocol.ReadFunction != 0 { protocolFunctions = append(protocolFunctions, fmt.Sprintf("readfunc = %s", funcInfoMap[protocol.ReadFunction].QualifiedName)) } if protocol.WriteFunction != 0 { protocolFunctions = append(protocolFunctions, fmt.Sprintf("writefunc = %s", funcInfoMap[protocol.WriteFunction].QualifiedName)) } if protocol.Validator != 0 { protocolFunctions = append(protocolFunctions, fmt.Sprintf("validatorfunc = %s", funcInfoMap[protocol.Validator].QualifiedName)) } metadataFile.MustPrintf("\n\nCREATE ") if protocol.Trusted { metadataFile.MustPrintf("TRUSTED ") } metadataFile.MustPrintf("PROTOCOL %s (%s);\n", protocol.Name, strings.Join(protocolFunctions, ", ")) section, entry := protocol.GetMetadataEntry() toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) } PrintObjectMetadata(metadataFile, toc, protoMetadata, protocol, "") } func PrintExchangeExternalPartitionStatements(metadataFile *utils.FileWithByteCount, toc *toc.TOC, extPartitions []PartitionInfo, partInfoMap map[uint32]PartitionInfo, tables []Table) { tableNameMap := make(map[uint32]string, len(tables)) for _, table := range tables { tableNameMap[table.Oid] = table.FQN() } for _, externalPartition := range extPartitions { extPartRelationName := tableNameMap[externalPartition.RelationOid] if extPartRelationName == "" { continue //Not included in the list of tables to back up } parentRelationName := utils.MakeFQN(externalPartition.ParentSchema, externalPartition.ParentRelationName) start := metadataFile.ByteCount alterPartitionStr := "" currentPartition := externalPartition for currentPartition.PartitionParentRuleOid != 0 { parent := partInfoMap[currentPartition.PartitionParentRuleOid] if parent.PartitionName == "" { alterPartitionStr = fmt.Sprintf("ALTER PARTITION FOR (RANK(%d)) ", parent.PartitionRank) + alterPartitionStr } else { alterPartitionStr = fmt.Sprintf("ALTER PARTITION %s ", parent.PartitionName) + alterPartitionStr } currentPartition = parent } metadataFile.MustPrintf("\n\nALTER TABLE %s %s", parentRelationName, alterPartitionStr) if externalPartition.PartitionName == "" { metadataFile.MustPrintf("EXCHANGE PARTITION FOR (RANK(%d)) ", externalPartition.PartitionRank) } else { metadataFile.MustPrintf("EXCHANGE PARTITION %s ", externalPartition.PartitionName) } metadataFile.MustPrintf("WITH TABLE %s WITHOUT VALIDATION;", extPartRelationName) metadataFile.MustPrintf("\n\nDROP TABLE %s;", extPartRelationName) section, entry := externalPartition.GetMetadataEntry() toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) } }
import ( "database/sql" "fmt" "strings"
random_line_split
dashboard.py
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # ails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. import config, defaults, htmllib, pprint, time from lib import * import wato # Python 2.3 does not have 'set' in normal namespace. # But it can be imported from 'sets' try: set() except NameError: from sets import Set as set loaded_with_language = False builtin_dashboards = {} # Declare constants to be used in the definitions of the dashboards GROW = 0 MAX = -1 # These settings might go into the config module, sometime in future, # in order to allow the user to customize this. header_height = 60 # Distance from top of the screen to the lower border of the heading screen_margin = 5 # Distance from the left border of the main-frame to the dashboard area dashlet_padding = 21, 5, 5, 0 # Margin (N, E, S, W) between outer border of dashlet and its content corner_overlap = 22 title_height = 0 # Height of dashlet title-box raster = 10, 10 # Raster the dashlet choords are measured in # Load plugins in web/plugins/dashboard and declare permissions, # note: these operations produce language-specific results and # thus must be reinitialized everytime a language-change has # been detected. def load_plugins(): global loaded_with_language if loaded_with_language == current_language: return # Permissions are currently not being defined. That will be the # case as soon as dashboards become editable. # Load plugins for dashboards. Currently these files # just may add custom dashboards by adding to builtin_dashboards. load_web_plugins("dashboard", globals()) # This must be set after plugin loading to make broken plugins raise # exceptions all the time and not only the first time (when the plugins # are loaded). loaded_with_language = current_language # In future there will be user editable dashboards just like # views which will be loaded. Currently we only use the builtin # dashboads. global dashboards dashboards = builtin_dashboards # HTML page handler for generating the (a) dashboard. The name # of the dashboard to render is given in the HTML variable 'name'. # This defaults to "main". def page_dashboard(): name = html.var("name", "main") if name not in dashboards: raise MKGeneralException("No such dashboard: '<b>%s</b>'" % name) render_dashboard(name) def add_wato_folder_to_url(url, wato_folder): if not wato_folder: return url elif '/' in url: return url # do not append wato_folder to non-Check_MK-urls elif '?' in url: return url + "&wato_folder=" + htmllib.urlencode(wato_folder) else: return url + "?wato_folder=" + htmllib.urlencode(wato_folder) # Actual rendering function def render_dashboard(name): board = dashboards[name] # The dashboard may be called with "wato_folder" set. In that case # the dashboard is assumed to restrict the shown data to a specific # WATO subfolder or file. This could be a configurable feature in # future, but currently we assume, that *all* dashboards are filename # sensitive. wato_folder = html.var("wato_folder") # When an empty wato_folder attribute is given a user really wants # to see only the hosts contained in the root folder. So don't ignore # the root folder anymore. #if not wato_folder: # ignore wato folder in case of root folder # wato_folder = None # The title of the dashboard needs to be prefixed with the WATO path, # in order to make it clear to the user, that he is seeing only partial # data. title = board["title"] global header_height if title is None: # If the title is none, hide the header line html.set_render_headfoot(False) header_height = 0 title = '' elif wato_folder is not None: title = wato.api.get_folder_title(wato_folder) + " - " + title stylesheets = ["pages", "dashboard", "status", "views"] stylesheets += ["theme/css/cloud-admin", "theme/css/themes/default", "theme/css/responsive", "theme/font-awesome/css/font-awesome.min", "theme/js/bootstrap-daterangepicker/daterangepicker-bs3", "theme/js/hubspot-messenger/css/messenger.min", "theme/js/hubspot-messenger/css/messenger-spinner.min", "theme/js/hubspot-messenger/css/messenger-theme-flat.min", "theme/js/jquery-ui-1.10.3.custom/css/custom-theme/jquery-ui-1.10.3.custom.min", "theme/js/bootstrap-switch/bootstrap-switch.min", "theme/css/flags/flags.min", "theme/css/fonts" ] javascripts = [] javascripts += ["dashboard"] html.header(title, javascripts=javascripts, stylesheets=stylesheets) html.write("""<div class="col-lg-12 margin-top-50" id="content">""") result = """ <!-- PAGE HEADER--> <div class="row"> <div class="col-sm-12"> <div class="page-header"> <!-- STYLER --> <!-- /STYLER --> <!-- BREADCRUMBS --> <ul class="breadcrumb"> <li> <i class="fa fa-home"></i> </li> <li>Dashboard</li> </ul> <!-- /BREADCRUMBS --> <div class="clearfix"> <h3 class="content-title pull-left">Dashboard</h3> </div> <div class="description">Blank Page</div> </div> </div> </div> <!-- /PAGE HEADER --> """ html.write(result) html.write(""" <div class="row"> <div class="col-sm-12"> """) # html.write("<div id=dashboard class=\"dashboard_%s\">\n" % name) # Container of all dashlets refresh_dashlets = [] # Dashlets with automatic refresh, for Javascript for nr, dashlet in enumerate(board["dashlets"]): # dashlets using the 'urlfunc' method will dynamically compute # an url (using HTML context variables at their wish). if "urlfunc" in dashlet: dashlet["url"] = dashlet["urlfunc"]() # dashlets using the 'url' method will be refreshed by us. Those # dashlets using static content (such as an iframe) will not be # refreshed by us but need to do that themselves. if "url" in dashlet: refresh_dashlets.append([nr, dashlet.get("refresh", 0), str(add_wato_folder_to_url(dashlet["url"], wato_folder))]) # Paint the dashlet's HTML code render_dashlet(nr, dashlet, wato_folder) html.write("</div>\n") html.write(""" </div> </div> """) # html.write("""</div>""") # Put list of all autorefresh-dashlets into Javascript and also make sure, # that the dashbaord is painted initially. The resize handler will make sure # that every time the user resizes the browser window the layout will be re-computed # and all dashlets resized to their new positions and sizes. html.javascript(""" //var header_height = %d; //var screen_margin = %d; //var title_height = %d; //var dashlet_padding = Array%s; //var corner_overlap = %d; var refresh_dashlets = %r; //var dashboard_name = '%s'; //set_dashboard_size(); //window.onresize = function () { set_dashboard_size(); } //window.onload = function () { set_dashboard_size(); } dashboard_scheduler(1); """ % (header_height, screen_margin, title_height, dashlet_padding, corner_overlap, refresh_dashlets, name)) html.body_end() # omit regular footer with status icons, etc. # Create the HTML code for one dashlet. Each dashlet has an id "dashlet_%d", # where %d is its index (in board["dashlets"]). Javascript uses that id # for the resizing. Within that div there is an inner div containing the # actual dashlet content. The margin between the inner and outer div is # used for stylish layout stuff (shadows, etc.) def render_dashlet(nr, dashlet, wato_folder): html.write(""" <div class="col-md-6"> """) # html.write('<div class=dashlet id="dashlet_%d">' % nr) # # render shadow # # if dashlet.get("shadow", True): # # for p in [ "nw", "ne", "sw", "se", "n", "s", "w", "e" ]: # # html.write('<img id="dashadow_%s_%d" class="shadow %s" src="images/dashadow-%s.png">' % # # (p, nr, p, p)) # if dashlet.get("title"): # url = dashlet.get("title_url", None) # if url: # title = '<a href="%s">%s</a>' % (url, dashlet["title"]) # else: # title = dashlet["title"] # html.write('<div class="title" id="dashlet_title_%d">%s</div>' % (nr, title)) # if dashlet.get("background", True): # bg = " background" # else: # bg = "" # html.write('<div class="dashlet_inner%s" id="dashlet_inner_%d">' % (bg, nr)) # html.write( "%s" % dashlet) # # Optional way to render a dynamic iframe URL if "iframefunc" in dashlet: dashlet["iframe"] = dashlet["iframefunc"]() # # The method "view" is a shortcut for "iframe" with a certain url if "view" in dashlet: dashlet["iframe"] = "view.py?view_name=%s&_display_options=HRSIXL&_body_class=dashlet" % dashlet["view"] html.write(""" <div class="box border"> <div class="box-title"> <h4><i class="fa fa-adjust"></i>%s</h4> <div class="tools hidden-xs"> <a class="config" data-toggle="modal" href="#box-config"> <i class="fa fa-cog"></i> </a> </div> </div> <div class="box-body"> <div class="chart" id="dashlet_%s" style="padding: 0px; position: relative;"> """ % (dashlet["title"], nr)) # # The content is rendered only if it is fixed. In the # # other cases the initial (re)-size will paint the content. if "content" in dashlet: # fixed content html.write(dashlet["content"]) elif "iframe" in dashlet: # fixed content containing iframe # Fix of iPad >:-P html.write('<div style="width: 100%; height: 100%; -webkit-overflow-scrolling:touch; overflow: auto;">') html.write('<iframe allowTransparency="true" frameborder="0" width="100%%" height="100%%" src="%s"></iframe>' % add_wato_folder_to_url(dashlet["iframe"], wato_folder)) html.write('</div>') html.write('<div class="dashlet_inner" id="dashlet_inner_%d">' % (nr)) html.write("</div>") html.write(""" </div> </div> </div> """ ) html.write("</div>\n") # Here comes the brain stuff: An intelligent liquid layout algorithm. # It is called via ajax, mainly because I was not eager to code this # directly in Javascript (though this would be possible and probably # more lean.) # Compute position and size of all dashlets def ajax_resize(): # computation with vectors class vec: def __init__(self, xy): self._data = xy def __div__(self, xy): return vec((self._data[0] / xy[0], self._data[1] / xy[1])) def __repr__(self): return repr(self._data) def __getitem__(self, i): return self._data[i] def make_absolute(self, size): n = [] for i in [0, 1]: if self._data[i] < 0: n.append(size[i] + self._data[i] + 1) # Here was a bug fixed by Markus Lengler else: n.append(self._data[i] - 1) # make begin from 0 return vec(n) # Compute the initial size of the dashlet. If MAX is used, # then the dashlet consumes all space in its growing direction, # regardless of any other dashlets. def initial_size(self, position, rastersize): n = [] for i in [0, 1]: if self._data[i] == MAX: n.append(rastersize[i] - abs(position[i]) + 1) elif self._data[i] == GROW: n.append(1) else: n.append(self._data[i]) return n def compute_grow_by(self, size): n = [] for i in [0, 1]: if size[i] != GROW: # absolute size, no growth n.append(0) elif self._data[i] < 0: n.append(-1) # grow direction left, up else: n.append(1) # grow direction right, down return n def __add__(self, b): return vec((self[0] + b[0], self[1] + b[1])) board = dashboards[html.var("name")] screensize = vec((int(html.var("width")), int(html.var("height")))) rastersize = screensize / raster used_matrix = {} # keep track of used raster elements # first place all dashlets at their absolute positions positions = [] for nr, dashlet in enumerate(board["dashlets"]): # Relative position is as noted in the declaration. 1,1 => top left origin, # -1,-1 => bottom right origin, 0 is not allowed here rel_position = vec(dashlet["position"]) # starting from 1, negative means: from right/bottom # Compute the absolute position, this time from 0 to rastersize-1 abs_position = rel_position.make_absolute(rastersize) # The size in raster-elements. A 0 for a dimension means growth. No negative values here. size = vec(dashlet["size"]) # Compute the minimum used size for the dashlet. For growth-dimensions we start with 1 used_size = size.initial_size(rel_position, rastersize) # Now compute the rectangle that is currently occupied. The choords # of bottomright are *not* included. if rel_position[0] > 0: left = abs_position[0] right = left + used_size[0] else: right = abs_position[0] left = right - used_size[0] if rel_position[1] > 0: top = abs_position[1] bottom = top + used_size[1] else:
# Allocate used squares in matrix. If not all squares we need are free, # then the dashboard is too small for all dashlets (as it seems). # TEST: Dashlet auf 0/0 setzen, wenn kein Platz dafür da ist. try: for x in range(left, right): for y in range(top, bottom): if (x,y) in used_matrix: raise Exception() used_matrix[(x,y)] = True # Helper variable for how to grow, both x and y in [-1, 0, 1] grow_by = rel_position.compute_grow_by(size) positions.append((nr, True, left, top, right, bottom, grow_by)) except: positions.append((nr, False, left, top, right, bottom, (0,0))) # now resize all elastic dashlets to the max, but only # by one raster at a time, in order to be fair def try_resize(x, y, width, height): return False if x + width >= xmax or y + height >= ymax: return False for xx in range(x, x + width): for yy in range(y, y + height): if used_matrix[xx][yy]: return False for xx in range(x, x + width): for yy in range(y, y + height): used_matrix[xx][yy] = True return True # Das hier ist FALSCH! In Wirklichkeit muss ich nur prüfen, # ob der *Zuwachs* nicht in der Matrix belegt ist. Das jetzige # Rechteck muss ich ausklammern. Es ist ja schon belegt. def try_allocate(left, top, right, bottom): # Try if all needed squares are free for x in range(left, right): for y in range(top, bottom): if (x,y) in used_matrix: return False # Allocate all needed squares for x in range(left, right): for y in range(top, bottom): used_matrix[(x,y)] = True return True # Now try to expand all elastic rectangles as far as possible at_least_one_expanded = True while at_least_one_expanded: at_least_one_expanded = False new_positions = [] for (nr, visible, left, top, right, bottom, grow_by) in positions: if visible: # html.write(repr((nr, left, top, right, bottom, grow_by))) # try to grow in X direction by one if grow_by[0] > 0 and right < rastersize[0] and try_allocate(right, top, right+1, bottom): at_least_one_expanded = True right += 1 elif grow_by[0] < 0 and left > 0 and try_allocate(left-1, top, left, bottom): at_least_one_expanded = True left -= 1 # try to grow in Y direction by one if grow_by[1] > 0 and bottom < rastersize[1] and try_allocate(left, bottom, right, bottom+1): at_least_one_expanded = True bottom += 1 elif grow_by[1] < 0 and top > 0 and try_allocate(left, top-1, right, top): at_least_one_expanded = True top -= 1 new_positions.append((nr, visible, left, top, right, bottom, grow_by)) positions = new_positions resize_info = [] for nr, visible, left, top, right, bottom, grow_by in positions: # html.write(repr((nr, left, top, right, bottom, grow_by))) # html.write("<br>") title = board["dashlets"][nr].get("title") if title: th = title_height else: th = 0 resize_info.append([nr, visible and 1 or 0, left * raster[0], top * raster[1] + th, (right - left) * raster[0], (bottom - top) * raster[1] - th]) html.write(repr(resize_info)) def dashlet_overview(): html.write( '<table class=dashlet_overview>' '<tr><td valign=top>' '<a href="http://mathias-kettner.de/check_mk.html"><img style="margin-right: 30px;" src="images/check_mk.trans.120.png"></a>' '</td>' '<td><h2>Check_MK Multisite</h2>' 'Welcome to Check_MK Multisite. If you want to learn more about Multsite, please visit ' 'our <a href="http://mathias-kettner.de/checkmk_multisite.html">online documentation</a>. ' 'Multisite is part of <a href="http://mathias-kettner.de/check_mk.html">Check_MK</a> - an Open Source ' 'project by <a href="http://mathias-kettner.de">Mathias Kettner</a>.' '</td>' ) html.write('</tr></table>') def dashlet_mk_logo(): html.write('<a href="http://mathias-kettner.de/check_mk.html">' '<img style="margin-right: 30px;" src="images/check_mk.trans.120.png"></a>') def dashlet_hoststats(): table = [ ( _("Up"), "#0b3", "searchhost&is_host_scheduled_downtime_depth=0&hst0=on", "Stats: state = 0\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "StatsAnd: 2\n"), ( _("Down"), "#f00", "searchhost&is_host_scheduled_downtime_depth=0&hst1=on", "Stats: state = 1\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "StatsAnd: 2\n"), ( _("Unreachable"), "#f80", "searchhost&is_host_scheduled_downtime_depth=0&hst2=on", "Stats: state = 2\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "StatsAnd: 2\n"), ( _("In Downtime"), "#0af", "searchhost&search=1&is_host_scheduled_downtime_depth=1", "Stats: scheduled_downtime_depth > 0\n" \ ) ] filter = "Filter: custom_variable_names < _REALNAME\n" render_statistics("hoststats", "hosts", table, filter, "Host Statistics") def dashlet_servicestats(): table = [ ( _("OK"), "#0b3", "searchsvc&hst0=on&st0=on&is_in_downtime=0", "Stats: state = 0\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ( _("In Downtime"), "#0af", "searchsvc&is_in_downtime=1", "Stats: scheduled_downtime_depth > 0\n" \ "Stats: host_scheduled_downtime_depth > 0\n" \ "StatsOr: 2\n"), ( _("On Down host"), "#048", "searchsvc&hst1=on&hst2=on&hstp=on&is_in_downtime=0", "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state != 0\n" \ "StatsAnd: 3\n"), ( _("Warning"), "#ff0", "searchsvc&hst0=on&st1=on&is_in_downtime=0", "Stats: state = 1\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ( _("Unknown"), "#f80", "searchsvc&hst0=on&st3=on&is_in_downtime=0", "Stats: state = 3\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ( _("Critical"), "#f00", "searchsvc&hst0=on&st2=on&is_in_downtime=0", "Stats: state = 2\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ] filter = "Filter: host_custom_variable_names < _REALNAME\n" render_statistics("servicestats", "services", table, filter, "Service Statistics") def render_statistics(pie_id, what, table, filter, title=None): html.write("<div class=stats>") pie_diameter = 130 pie_left_aspect = 0.5 pie_right_aspect = 0.8 # Is the query restricted to a certain WATO-path? wato_folder = html.var("wato_folder") if wato_folder: # filter += "Filter: host_state = 0" filter += "Filter: host_filename ~ ^/wato/%s/\n" % wato_folder.replace("\n", "") # Is the query restricted to a host contact group? host_contact_group = html.var("host_contact_group") if host_contact_group: filter += "Filter: host_contact_groups >= %s\n" % host_contact_group.replace("\n", "") # Is the query restricted to a service contact group? service_contact_group = html.var("service_contact_group") if service_contact_group: filter += "Filter: service_contact_groups >= %s\n" % service_contact_group.replace("\n", "") query = "GET %s\n" % what for entry in table: query += entry[3] query += filter result = html.live.query_summed_stats(query) pies = zip(table, result) total = sum([x[1] for x in pies]) html.write('<div class=pie width=%d height=%d id="%s_stats" style="float: left"></div>' % (pie_diameter, pie_diameter, pie_id)) # html.write('<img src="images/globe.png" class="globe">') # html.write('<table class="hoststats%s" style="float:left">' % ( # len(pies) > 1 and " narrow" or "")) # table_entries = pies # while len(table_entries) < 6: # table_entries = table_entries + [ (("", "#95BBCD", "", ""), "&nbsp;") ] # table_entries.append(((_("Total"), "", "all%s" % what, ""), total)) # for (name, color, viewurl, query), count in table_entries: # url = "view.py?view_name=" + viewurl + "&filled_in=filter&search=1&wato_folder=" \ # + htmllib.urlencode(html.var("wato_folder", "")) # if host_contact_group: # url += '&opthost_contactgroup=' + host_contact_group # if service_contact_group: # url += '&optservice_contactgroup=' + service_contact_group # html.write('<tr><th><a href="%s">%s</a></th>' % (url, name)) # style = '' # if color: # style = ' style="background-color: %s"' % color # html.write('<td class=color%s>' # '</td><td><a href="%s">%s</a></td></tr>' % (style, url, count)) # html.write("</table>") data = [] for pie in pies: typeof = pie[0][0] count = pie[1] data.append([ typeof, count]) html.write("</div>") html.javascript(""" draw_hchart(id="%s", type="pie" , title="%s", name="%s", data=%s); """ % (pie_id, title, title, data)) def dashlet_pnpgraph(): render_pnpgraph( html.var("site"), html.var("host"), html.var("service"), int(html.var("source", 0)), int(html.var("view", 0)), ) def dashlet_nodata(): html.write("<div class=nograph><div class=msg>") html.write(html.var("message", _("No data available."))) html.write("</div></div>") def render_pnpgraph(site, host, service = None, source = 0, view = 0): if not host: html.message("Invalid URL to this dashlet. Missing <tt>host</tt>") return; if not service: service = "_HOST_" if not site: base_url = defaults.url_prefix else: base_url = html.site_status[site]["site"]["url_prefix"] base_url += "pnp4nagios/index.php/" var_part = "?host=%s&srv=%s&view=0&source=%d&view=%d&theme=multisite&_t=%d" % \ (pnp_cleanup(host), pnp_cleanup(service), source, view, int(time.time())) pnp_url = base_url + "graph" + var_part img_url = base_url + "image" + var_part html.write('<a href="%s"><img border=0 src="%s"></a>' % (pnp_url, img_url)) # load_plugins()
bottom = abs_position[1] top = bottom - used_size[1]
conditional_block
dashboard.py
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # ails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. import config, defaults, htmllib, pprint, time from lib import * import wato # Python 2.3 does not have 'set' in normal namespace. # But it can be imported from 'sets' try: set() except NameError: from sets import Set as set loaded_with_language = False builtin_dashboards = {} # Declare constants to be used in the definitions of the dashboards GROW = 0 MAX = -1 # These settings might go into the config module, sometime in future, # in order to allow the user to customize this. header_height = 60 # Distance from top of the screen to the lower border of the heading screen_margin = 5 # Distance from the left border of the main-frame to the dashboard area dashlet_padding = 21, 5, 5, 0 # Margin (N, E, S, W) between outer border of dashlet and its content corner_overlap = 22 title_height = 0 # Height of dashlet title-box raster = 10, 10 # Raster the dashlet choords are measured in # Load plugins in web/plugins/dashboard and declare permissions, # note: these operations produce language-specific results and # thus must be reinitialized everytime a language-change has # been detected. def load_plugins(): global loaded_with_language if loaded_with_language == current_language: return # Permissions are currently not being defined. That will be the # case as soon as dashboards become editable. # Load plugins for dashboards. Currently these files # just may add custom dashboards by adding to builtin_dashboards. load_web_plugins("dashboard", globals()) # This must be set after plugin loading to make broken plugins raise # exceptions all the time and not only the first time (when the plugins # are loaded). loaded_with_language = current_language # In future there will be user editable dashboards just like # views which will be loaded. Currently we only use the builtin # dashboads. global dashboards dashboards = builtin_dashboards # HTML page handler for generating the (a) dashboard. The name # of the dashboard to render is given in the HTML variable 'name'. # This defaults to "main". def page_dashboard(): name = html.var("name", "main") if name not in dashboards: raise MKGeneralException("No such dashboard: '<b>%s</b>'" % name) render_dashboard(name) def add_wato_folder_to_url(url, wato_folder): if not wato_folder: return url elif '/' in url: return url # do not append wato_folder to non-Check_MK-urls elif '?' in url: return url + "&wato_folder=" + htmllib.urlencode(wato_folder) else: return url + "?wato_folder=" + htmllib.urlencode(wato_folder) # Actual rendering function def render_dashboard(name): board = dashboards[name] # The dashboard may be called with "wato_folder" set. In that case # the dashboard is assumed to restrict the shown data to a specific # WATO subfolder or file. This could be a configurable feature in # future, but currently we assume, that *all* dashboards are filename # sensitive. wato_folder = html.var("wato_folder") # When an empty wato_folder attribute is given a user really wants # to see only the hosts contained in the root folder. So don't ignore # the root folder anymore. #if not wato_folder: # ignore wato folder in case of root folder # wato_folder = None # The title of the dashboard needs to be prefixed with the WATO path, # in order to make it clear to the user, that he is seeing only partial # data. title = board["title"] global header_height if title is None: # If the title is none, hide the header line html.set_render_headfoot(False) header_height = 0 title = '' elif wato_folder is not None: title = wato.api.get_folder_title(wato_folder) + " - " + title stylesheets = ["pages", "dashboard", "status", "views"] stylesheets += ["theme/css/cloud-admin", "theme/css/themes/default", "theme/css/responsive", "theme/font-awesome/css/font-awesome.min", "theme/js/bootstrap-daterangepicker/daterangepicker-bs3", "theme/js/hubspot-messenger/css/messenger.min", "theme/js/hubspot-messenger/css/messenger-spinner.min", "theme/js/hubspot-messenger/css/messenger-theme-flat.min", "theme/js/jquery-ui-1.10.3.custom/css/custom-theme/jquery-ui-1.10.3.custom.min", "theme/js/bootstrap-switch/bootstrap-switch.min", "theme/css/flags/flags.min", "theme/css/fonts" ] javascripts = [] javascripts += ["dashboard"] html.header(title, javascripts=javascripts, stylesheets=stylesheets) html.write("""<div class="col-lg-12 margin-top-50" id="content">""") result = """ <!-- PAGE HEADER--> <div class="row"> <div class="col-sm-12"> <div class="page-header"> <!-- STYLER --> <!-- /STYLER --> <!-- BREADCRUMBS --> <ul class="breadcrumb"> <li> <i class="fa fa-home"></i> </li> <li>Dashboard</li> </ul> <!-- /BREADCRUMBS --> <div class="clearfix"> <h3 class="content-title pull-left">Dashboard</h3> </div> <div class="description">Blank Page</div> </div> </div> </div> <!-- /PAGE HEADER --> """ html.write(result) html.write(""" <div class="row"> <div class="col-sm-12"> """) # html.write("<div id=dashboard class=\"dashboard_%s\">\n" % name) # Container of all dashlets refresh_dashlets = [] # Dashlets with automatic refresh, for Javascript for nr, dashlet in enumerate(board["dashlets"]): # dashlets using the 'urlfunc' method will dynamically compute # an url (using HTML context variables at their wish). if "urlfunc" in dashlet: dashlet["url"] = dashlet["urlfunc"]() # dashlets using the 'url' method will be refreshed by us. Those # dashlets using static content (such as an iframe) will not be # refreshed by us but need to do that themselves. if "url" in dashlet: refresh_dashlets.append([nr, dashlet.get("refresh", 0), str(add_wato_folder_to_url(dashlet["url"], wato_folder))]) # Paint the dashlet's HTML code render_dashlet(nr, dashlet, wato_folder) html.write("</div>\n") html.write(""" </div> </div> """) # html.write("""</div>""") # Put list of all autorefresh-dashlets into Javascript and also make sure, # that the dashbaord is painted initially. The resize handler will make sure # that every time the user resizes the browser window the layout will be re-computed # and all dashlets resized to their new positions and sizes. html.javascript(""" //var header_height = %d; //var screen_margin = %d; //var title_height = %d; //var dashlet_padding = Array%s; //var corner_overlap = %d; var refresh_dashlets = %r; //var dashboard_name = '%s'; //set_dashboard_size(); //window.onresize = function () { set_dashboard_size(); } //window.onload = function () { set_dashboard_size(); } dashboard_scheduler(1); """ % (header_height, screen_margin, title_height, dashlet_padding, corner_overlap, refresh_dashlets, name)) html.body_end() # omit regular footer with status icons, etc. # Create the HTML code for one dashlet. Each dashlet has an id "dashlet_%d", # where %d is its index (in board["dashlets"]). Javascript uses that id # for the resizing. Within that div there is an inner div containing the # actual dashlet content. The margin between the inner and outer div is # used for stylish layout stuff (shadows, etc.) def render_dashlet(nr, dashlet, wato_folder): html.write(""" <div class="col-md-6"> """) # html.write('<div class=dashlet id="dashlet_%d">' % nr) # # render shadow # # if dashlet.get("shadow", True): # # for p in [ "nw", "ne", "sw", "se", "n", "s", "w", "e" ]: # # html.write('<img id="dashadow_%s_%d" class="shadow %s" src="images/dashadow-%s.png">' % # # (p, nr, p, p)) # if dashlet.get("title"): # url = dashlet.get("title_url", None) # if url: # title = '<a href="%s">%s</a>' % (url, dashlet["title"]) # else: # title = dashlet["title"] # html.write('<div class="title" id="dashlet_title_%d">%s</div>' % (nr, title)) # if dashlet.get("background", True): # bg = " background" # else: # bg = "" # html.write('<div class="dashlet_inner%s" id="dashlet_inner_%d">' % (bg, nr)) # html.write( "%s" % dashlet) # # Optional way to render a dynamic iframe URL if "iframefunc" in dashlet: dashlet["iframe"] = dashlet["iframefunc"]() # # The method "view" is a shortcut for "iframe" with a certain url if "view" in dashlet: dashlet["iframe"] = "view.py?view_name=%s&_display_options=HRSIXL&_body_class=dashlet" % dashlet["view"] html.write(""" <div class="box border"> <div class="box-title"> <h4><i class="fa fa-adjust"></i>%s</h4> <div class="tools hidden-xs"> <a class="config" data-toggle="modal" href="#box-config"> <i class="fa fa-cog"></i> </a> </div> </div> <div class="box-body"> <div class="chart" id="dashlet_%s" style="padding: 0px; position: relative;"> """ % (dashlet["title"], nr)) # # The content is rendered only if it is fixed. In the # # other cases the initial (re)-size will paint the content. if "content" in dashlet: # fixed content html.write(dashlet["content"]) elif "iframe" in dashlet: # fixed content containing iframe # Fix of iPad >:-P html.write('<div style="width: 100%; height: 100%; -webkit-overflow-scrolling:touch; overflow: auto;">') html.write('<iframe allowTransparency="true" frameborder="0" width="100%%" height="100%%" src="%s"></iframe>' % add_wato_folder_to_url(dashlet["iframe"], wato_folder)) html.write('</div>') html.write('<div class="dashlet_inner" id="dashlet_inner_%d">' % (nr)) html.write("</div>") html.write(""" </div> </div> </div> """ ) html.write("</div>\n") # Here comes the brain stuff: An intelligent liquid layout algorithm. # It is called via ajax, mainly because I was not eager to code this # directly in Javascript (though this would be possible and probably # more lean.) # Compute position and size of all dashlets def ajax_resize(): # computation with vectors class vec: def __init__(self, xy): self._data = xy def __div__(self, xy): return vec((self._data[0] / xy[0], self._data[1] / xy[1])) def __repr__(self): return repr(self._data) def __getitem__(self, i): return self._data[i] def make_absolute(self, size): n = [] for i in [0, 1]: if self._data[i] < 0: n.append(size[i] + self._data[i] + 1) # Here was a bug fixed by Markus Lengler else: n.append(self._data[i] - 1) # make begin from 0 return vec(n) # Compute the initial size of the dashlet. If MAX is used, # then the dashlet consumes all space in its growing direction, # regardless of any other dashlets. def initial_size(self, position, rastersize): n = [] for i in [0, 1]: if self._data[i] == MAX: n.append(rastersize[i] - abs(position[i]) + 1) elif self._data[i] == GROW: n.append(1) else: n.append(self._data[i]) return n def compute_grow_by(self, size): n = [] for i in [0, 1]: if size[i] != GROW: # absolute size, no growth n.append(0) elif self._data[i] < 0: n.append(-1) # grow direction left, up else: n.append(1) # grow direction right, down return n def __add__(self, b): return vec((self[0] + b[0], self[1] + b[1])) board = dashboards[html.var("name")] screensize = vec((int(html.var("width")), int(html.var("height")))) rastersize = screensize / raster used_matrix = {} # keep track of used raster elements # first place all dashlets at their absolute positions positions = [] for nr, dashlet in enumerate(board["dashlets"]): # Relative position is as noted in the declaration. 1,1 => top left origin, # -1,-1 => bottom right origin, 0 is not allowed here rel_position = vec(dashlet["position"]) # starting from 1, negative means: from right/bottom # Compute the absolute position, this time from 0 to rastersize-1 abs_position = rel_position.make_absolute(rastersize) # The size in raster-elements. A 0 for a dimension means growth. No negative values here. size = vec(dashlet["size"]) # Compute the minimum used size for the dashlet. For growth-dimensions we start with 1 used_size = size.initial_size(rel_position, rastersize) # Now compute the rectangle that is currently occupied. The choords # of bottomright are *not* included. if rel_position[0] > 0: left = abs_position[0] right = left + used_size[0] else: right = abs_position[0] left = right - used_size[0] if rel_position[1] > 0: top = abs_position[1] bottom = top + used_size[1] else: bottom = abs_position[1] top = bottom - used_size[1] # Allocate used squares in matrix. If not all squares we need are free, # then the dashboard is too small for all dashlets (as it seems). # TEST: Dashlet auf 0/0 setzen, wenn kein Platz dafür da ist. try: for x in range(left, right): for y in range(top, bottom): if (x,y) in used_matrix: raise Exception() used_matrix[(x,y)] = True # Helper variable for how to grow, both x and y in [-1, 0, 1] grow_by = rel_position.compute_grow_by(size) positions.append((nr, True, left, top, right, bottom, grow_by)) except: positions.append((nr, False, left, top, right, bottom, (0,0))) # now resize all elastic dashlets to the max, but only # by one raster at a time, in order to be fair def try_resize(x, y, width, height): return False if x + width >= xmax or y + height >= ymax: return False for xx in range(x, x + width): for yy in range(y, y + height): if used_matrix[xx][yy]: return False for xx in range(x, x + width): for yy in range(y, y + height): used_matrix[xx][yy] = True return True # Das hier ist FALSCH! In Wirklichkeit muss ich nur prüfen, # ob der *Zuwachs* nicht in der Matrix belegt ist. Das jetzige # Rechteck muss ich ausklammern. Es ist ja schon belegt. def try_allocate(left, top, right, bottom): # Try if all needed squares are free for x in range(left, right): for y in range(top, bottom): if (x,y) in used_matrix: return False # Allocate all needed squares for x in range(left, right): for y in range(top, bottom): used_matrix[(x,y)] = True return True # Now try to expand all elastic rectangles as far as possible at_least_one_expanded = True while at_least_one_expanded: at_least_one_expanded = False new_positions = [] for (nr, visible, left, top, right, bottom, grow_by) in positions: if visible: # html.write(repr((nr, left, top, right, bottom, grow_by))) # try to grow in X direction by one if grow_by[0] > 0 and right < rastersize[0] and try_allocate(right, top, right+1, bottom): at_least_one_expanded = True right += 1 elif grow_by[0] < 0 and left > 0 and try_allocate(left-1, top, left, bottom): at_least_one_expanded = True left -= 1 # try to grow in Y direction by one if grow_by[1] > 0 and bottom < rastersize[1] and try_allocate(left, bottom, right, bottom+1): at_least_one_expanded = True bottom += 1 elif grow_by[1] < 0 and top > 0 and try_allocate(left, top-1, right, top): at_least_one_expanded = True top -= 1 new_positions.append((nr, visible, left, top, right, bottom, grow_by)) positions = new_positions resize_info = [] for nr, visible, left, top, right, bottom, grow_by in positions: # html.write(repr((nr, left, top, right, bottom, grow_by))) # html.write("<br>") title = board["dashlets"][nr].get("title") if title: th = title_height else: th = 0 resize_info.append([nr, visible and 1 or 0, left * raster[0], top * raster[1] + th, (right - left) * raster[0], (bottom - top) * raster[1] - th]) html.write(repr(resize_info)) def da
: html.write( '<table class=dashlet_overview>' '<tr><td valign=top>' '<a href="http://mathias-kettner.de/check_mk.html"><img style="margin-right: 30px;" src="images/check_mk.trans.120.png"></a>' '</td>' '<td><h2>Check_MK Multisite</h2>' 'Welcome to Check_MK Multisite. If you want to learn more about Multsite, please visit ' 'our <a href="http://mathias-kettner.de/checkmk_multisite.html">online documentation</a>. ' 'Multisite is part of <a href="http://mathias-kettner.de/check_mk.html">Check_MK</a> - an Open Source ' 'project by <a href="http://mathias-kettner.de">Mathias Kettner</a>.' '</td>' ) html.write('</tr></table>') def dashlet_mk_logo(): html.write('<a href="http://mathias-kettner.de/check_mk.html">' '<img style="margin-right: 30px;" src="images/check_mk.trans.120.png"></a>') def dashlet_hoststats(): table = [ ( _("Up"), "#0b3", "searchhost&is_host_scheduled_downtime_depth=0&hst0=on", "Stats: state = 0\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "StatsAnd: 2\n"), ( _("Down"), "#f00", "searchhost&is_host_scheduled_downtime_depth=0&hst1=on", "Stats: state = 1\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "StatsAnd: 2\n"), ( _("Unreachable"), "#f80", "searchhost&is_host_scheduled_downtime_depth=0&hst2=on", "Stats: state = 2\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "StatsAnd: 2\n"), ( _("In Downtime"), "#0af", "searchhost&search=1&is_host_scheduled_downtime_depth=1", "Stats: scheduled_downtime_depth > 0\n" \ ) ] filter = "Filter: custom_variable_names < _REALNAME\n" render_statistics("hoststats", "hosts", table, filter, "Host Statistics") def dashlet_servicestats(): table = [ ( _("OK"), "#0b3", "searchsvc&hst0=on&st0=on&is_in_downtime=0", "Stats: state = 0\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ( _("In Downtime"), "#0af", "searchsvc&is_in_downtime=1", "Stats: scheduled_downtime_depth > 0\n" \ "Stats: host_scheduled_downtime_depth > 0\n" \ "StatsOr: 2\n"), ( _("On Down host"), "#048", "searchsvc&hst1=on&hst2=on&hstp=on&is_in_downtime=0", "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state != 0\n" \ "StatsAnd: 3\n"), ( _("Warning"), "#ff0", "searchsvc&hst0=on&st1=on&is_in_downtime=0", "Stats: state = 1\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ( _("Unknown"), "#f80", "searchsvc&hst0=on&st3=on&is_in_downtime=0", "Stats: state = 3\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ( _("Critical"), "#f00", "searchsvc&hst0=on&st2=on&is_in_downtime=0", "Stats: state = 2\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ] filter = "Filter: host_custom_variable_names < _REALNAME\n" render_statistics("servicestats", "services", table, filter, "Service Statistics") def render_statistics(pie_id, what, table, filter, title=None): html.write("<div class=stats>") pie_diameter = 130 pie_left_aspect = 0.5 pie_right_aspect = 0.8 # Is the query restricted to a certain WATO-path? wato_folder = html.var("wato_folder") if wato_folder: # filter += "Filter: host_state = 0" filter += "Filter: host_filename ~ ^/wato/%s/\n" % wato_folder.replace("\n", "") # Is the query restricted to a host contact group? host_contact_group = html.var("host_contact_group") if host_contact_group: filter += "Filter: host_contact_groups >= %s\n" % host_contact_group.replace("\n", "") # Is the query restricted to a service contact group? service_contact_group = html.var("service_contact_group") if service_contact_group: filter += "Filter: service_contact_groups >= %s\n" % service_contact_group.replace("\n", "") query = "GET %s\n" % what for entry in table: query += entry[3] query += filter result = html.live.query_summed_stats(query) pies = zip(table, result) total = sum([x[1] for x in pies]) html.write('<div class=pie width=%d height=%d id="%s_stats" style="float: left"></div>' % (pie_diameter, pie_diameter, pie_id)) # html.write('<img src="images/globe.png" class="globe">') # html.write('<table class="hoststats%s" style="float:left">' % ( # len(pies) > 1 and " narrow" or "")) # table_entries = pies # while len(table_entries) < 6: # table_entries = table_entries + [ (("", "#95BBCD", "", ""), "&nbsp;") ] # table_entries.append(((_("Total"), "", "all%s" % what, ""), total)) # for (name, color, viewurl, query), count in table_entries: # url = "view.py?view_name=" + viewurl + "&filled_in=filter&search=1&wato_folder=" \ # + htmllib.urlencode(html.var("wato_folder", "")) # if host_contact_group: # url += '&opthost_contactgroup=' + host_contact_group # if service_contact_group: # url += '&optservice_contactgroup=' + service_contact_group # html.write('<tr><th><a href="%s">%s</a></th>' % (url, name)) # style = '' # if color: # style = ' style="background-color: %s"' % color # html.write('<td class=color%s>' # '</td><td><a href="%s">%s</a></td></tr>' % (style, url, count)) # html.write("</table>") data = [] for pie in pies: typeof = pie[0][0] count = pie[1] data.append([ typeof, count]) html.write("</div>") html.javascript(""" draw_hchart(id="%s", type="pie" , title="%s", name="%s", data=%s); """ % (pie_id, title, title, data)) def dashlet_pnpgraph(): render_pnpgraph( html.var("site"), html.var("host"), html.var("service"), int(html.var("source", 0)), int(html.var("view", 0)), ) def dashlet_nodata(): html.write("<div class=nograph><div class=msg>") html.write(html.var("message", _("No data available."))) html.write("</div></div>") def render_pnpgraph(site, host, service = None, source = 0, view = 0): if not host: html.message("Invalid URL to this dashlet. Missing <tt>host</tt>") return; if not service: service = "_HOST_" if not site: base_url = defaults.url_prefix else: base_url = html.site_status[site]["site"]["url_prefix"] base_url += "pnp4nagios/index.php/" var_part = "?host=%s&srv=%s&view=0&source=%d&view=%d&theme=multisite&_t=%d" % \ (pnp_cleanup(host), pnp_cleanup(service), source, view, int(time.time())) pnp_url = base_url + "graph" + var_part img_url = base_url + "image" + var_part html.write('<a href="%s"><img border=0 src="%s"></a>' % (pnp_url, img_url)) # load_plugins()
shlet_overview()
identifier_name
dashboard.py
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # ails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. import config, defaults, htmllib, pprint, time from lib import * import wato # Python 2.3 does not have 'set' in normal namespace. # But it can be imported from 'sets' try: set() except NameError: from sets import Set as set loaded_with_language = False builtin_dashboards = {} # Declare constants to be used in the definitions of the dashboards GROW = 0 MAX = -1 # These settings might go into the config module, sometime in future, # in order to allow the user to customize this. header_height = 60 # Distance from top of the screen to the lower border of the heading screen_margin = 5 # Distance from the left border of the main-frame to the dashboard area dashlet_padding = 21, 5, 5, 0 # Margin (N, E, S, W) between outer border of dashlet and its content corner_overlap = 22 title_height = 0 # Height of dashlet title-box raster = 10, 10 # Raster the dashlet choords are measured in # Load plugins in web/plugins/dashboard and declare permissions, # note: these operations produce language-specific results and # thus must be reinitialized everytime a language-change has # been detected. def load_plugins(): global loaded_with_language if loaded_with_language == current_language: return # Permissions are currently not being defined. That will be the # case as soon as dashboards become editable. # Load plugins for dashboards. Currently these files # just may add custom dashboards by adding to builtin_dashboards. load_web_plugins("dashboard", globals()) # This must be set after plugin loading to make broken plugins raise # exceptions all the time and not only the first time (when the plugins # are loaded). loaded_with_language = current_language # In future there will be user editable dashboards just like # views which will be loaded. Currently we only use the builtin # dashboads. global dashboards dashboards = builtin_dashboards # HTML page handler for generating the (a) dashboard. The name # of the dashboard to render is given in the HTML variable 'name'. # This defaults to "main". def page_dashboard(): name = html.var("name", "main") if name not in dashboards: raise MKGeneralException("No such dashboard: '<b>%s</b>'" % name) render_dashboard(name) def add_wato_folder_to_url(url, wato_folder): if not wato_folder: return url elif '/' in url: return url # do not append wato_folder to non-Check_MK-urls elif '?' in url: return url + "&wato_folder=" + htmllib.urlencode(wato_folder) else: return url + "?wato_folder=" + htmllib.urlencode(wato_folder) # Actual rendering function def render_dashboard(name): board = dashboards[name] # The dashboard may be called with "wato_folder" set. In that case # the dashboard is assumed to restrict the shown data to a specific # WATO subfolder or file. This could be a configurable feature in # future, but currently we assume, that *all* dashboards are filename # sensitive. wato_folder = html.var("wato_folder") # When an empty wato_folder attribute is given a user really wants # to see only the hosts contained in the root folder. So don't ignore # the root folder anymore. #if not wato_folder: # ignore wato folder in case of root folder # wato_folder = None # The title of the dashboard needs to be prefixed with the WATO path, # in order to make it clear to the user, that he is seeing only partial # data. title = board["title"] global header_height if title is None: # If the title is none, hide the header line html.set_render_headfoot(False) header_height = 0 title = '' elif wato_folder is not None: title = wato.api.get_folder_title(wato_folder) + " - " + title stylesheets = ["pages", "dashboard", "status", "views"] stylesheets += ["theme/css/cloud-admin", "theme/css/themes/default", "theme/css/responsive", "theme/font-awesome/css/font-awesome.min", "theme/js/bootstrap-daterangepicker/daterangepicker-bs3", "theme/js/hubspot-messenger/css/messenger.min", "theme/js/hubspot-messenger/css/messenger-spinner.min", "theme/js/hubspot-messenger/css/messenger-theme-flat.min", "theme/js/jquery-ui-1.10.3.custom/css/custom-theme/jquery-ui-1.10.3.custom.min", "theme/js/bootstrap-switch/bootstrap-switch.min", "theme/css/flags/flags.min", "theme/css/fonts" ] javascripts = [] javascripts += ["dashboard"] html.header(title, javascripts=javascripts, stylesheets=stylesheets) html.write("""<div class="col-lg-12 margin-top-50" id="content">""") result = """ <!-- PAGE HEADER--> <div class="row"> <div class="col-sm-12"> <div class="page-header"> <!-- STYLER --> <!-- /STYLER --> <!-- BREADCRUMBS --> <ul class="breadcrumb"> <li> <i class="fa fa-home"></i> </li> <li>Dashboard</li> </ul> <!-- /BREADCRUMBS --> <div class="clearfix"> <h3 class="content-title pull-left">Dashboard</h3> </div> <div class="description">Blank Page</div> </div> </div> </div> <!-- /PAGE HEADER --> """ html.write(result) html.write(""" <div class="row"> <div class="col-sm-12"> """) # html.write("<div id=dashboard class=\"dashboard_%s\">\n" % name) # Container of all dashlets refresh_dashlets = [] # Dashlets with automatic refresh, for Javascript for nr, dashlet in enumerate(board["dashlets"]): # dashlets using the 'urlfunc' method will dynamically compute # an url (using HTML context variables at their wish). if "urlfunc" in dashlet: dashlet["url"] = dashlet["urlfunc"]() # dashlets using the 'url' method will be refreshed by us. Those # dashlets using static content (such as an iframe) will not be # refreshed by us but need to do that themselves. if "url" in dashlet: refresh_dashlets.append([nr, dashlet.get("refresh", 0), str(add_wato_folder_to_url(dashlet["url"], wato_folder))]) # Paint the dashlet's HTML code render_dashlet(nr, dashlet, wato_folder) html.write("</div>\n") html.write(""" </div> </div> """) # html.write("""</div>""") # Put list of all autorefresh-dashlets into Javascript and also make sure, # that the dashbaord is painted initially. The resize handler will make sure # that every time the user resizes the browser window the layout will be re-computed # and all dashlets resized to their new positions and sizes. html.javascript(""" //var header_height = %d; //var screen_margin = %d; //var title_height = %d; //var dashlet_padding = Array%s; //var corner_overlap = %d; var refresh_dashlets = %r; //var dashboard_name = '%s'; //set_dashboard_size(); //window.onresize = function () { set_dashboard_size(); } //window.onload = function () { set_dashboard_size(); } dashboard_scheduler(1); """ % (header_height, screen_margin, title_height, dashlet_padding, corner_overlap, refresh_dashlets, name)) html.body_end() # omit regular footer with status icons, etc. # Create the HTML code for one dashlet. Each dashlet has an id "dashlet_%d", # where %d is its index (in board["dashlets"]). Javascript uses that id # for the resizing. Within that div there is an inner div containing the # actual dashlet content. The margin between the inner and outer div is # used for stylish layout stuff (shadows, etc.) def render_dashlet(nr, dashlet, wato_folder): html.write(""" <div class="col-md-6"> """) # html.write('<div class=dashlet id="dashlet_%d">' % nr) # # render shadow # # if dashlet.get("shadow", True): # # for p in [ "nw", "ne", "sw", "se", "n", "s", "w", "e" ]: # # html.write('<img id="dashadow_%s_%d" class="shadow %s" src="images/dashadow-%s.png">' % # # (p, nr, p, p)) # if dashlet.get("title"): # url = dashlet.get("title_url", None) # if url: # title = '<a href="%s">%s</a>' % (url, dashlet["title"]) # else: # title = dashlet["title"] # html.write('<div class="title" id="dashlet_title_%d">%s</div>' % (nr, title)) # if dashlet.get("background", True): # bg = " background" # else: # bg = "" # html.write('<div class="dashlet_inner%s" id="dashlet_inner_%d">' % (bg, nr)) # html.write( "%s" % dashlet) # # Optional way to render a dynamic iframe URL if "iframefunc" in dashlet: dashlet["iframe"] = dashlet["iframefunc"]() # # The method "view" is a shortcut for "iframe" with a certain url if "view" in dashlet: dashlet["iframe"] = "view.py?view_name=%s&_display_options=HRSIXL&_body_class=dashlet" % dashlet["view"] html.write(""" <div class="box border"> <div class="box-title"> <h4><i class="fa fa-adjust"></i>%s</h4> <div class="tools hidden-xs"> <a class="config" data-toggle="modal" href="#box-config"> <i class="fa fa-cog"></i> </a> </div> </div> <div class="box-body"> <div class="chart" id="dashlet_%s" style="padding: 0px; position: relative;"> """ % (dashlet["title"], nr)) # # The content is rendered only if it is fixed. In the # # other cases the initial (re)-size will paint the content. if "content" in dashlet: # fixed content html.write(dashlet["content"]) elif "iframe" in dashlet: # fixed content containing iframe # Fix of iPad >:-P html.write('<div style="width: 100%; height: 100%; -webkit-overflow-scrolling:touch; overflow: auto;">') html.write('<iframe allowTransparency="true" frameborder="0" width="100%%" height="100%%" src="%s"></iframe>' % add_wato_folder_to_url(dashlet["iframe"], wato_folder)) html.write('</div>') html.write('<div class="dashlet_inner" id="dashlet_inner_%d">' % (nr)) html.write("</div>") html.write(""" </div> </div> </div> """ ) html.write("</div>\n") # Here comes the brain stuff: An intelligent liquid layout algorithm. # It is called via ajax, mainly because I was not eager to code this # directly in Javascript (though this would be possible and probably # more lean.) # Compute position and size of all dashlets def ajax_resize(): # computation with vectors class vec: def __init__(self, xy): self._data = xy def __div__(self, xy): return vec((self._data[0] / xy[0], self._data[1] / xy[1])) def __repr__(self): return repr(self._data) def __getitem__(self, i): return self._data[i] def make_absolute(self, size):
# Compute the initial size of the dashlet. If MAX is used, # then the dashlet consumes all space in its growing direction, # regardless of any other dashlets. def initial_size(self, position, rastersize): n = [] for i in [0, 1]: if self._data[i] == MAX: n.append(rastersize[i] - abs(position[i]) + 1) elif self._data[i] == GROW: n.append(1) else: n.append(self._data[i]) return n def compute_grow_by(self, size): n = [] for i in [0, 1]: if size[i] != GROW: # absolute size, no growth n.append(0) elif self._data[i] < 0: n.append(-1) # grow direction left, up else: n.append(1) # grow direction right, down return n def __add__(self, b): return vec((self[0] + b[0], self[1] + b[1])) board = dashboards[html.var("name")] screensize = vec((int(html.var("width")), int(html.var("height")))) rastersize = screensize / raster used_matrix = {} # keep track of used raster elements # first place all dashlets at their absolute positions positions = [] for nr, dashlet in enumerate(board["dashlets"]): # Relative position is as noted in the declaration. 1,1 => top left origin, # -1,-1 => bottom right origin, 0 is not allowed here rel_position = vec(dashlet["position"]) # starting from 1, negative means: from right/bottom # Compute the absolute position, this time from 0 to rastersize-1 abs_position = rel_position.make_absolute(rastersize) # The size in raster-elements. A 0 for a dimension means growth. No negative values here. size = vec(dashlet["size"]) # Compute the minimum used size for the dashlet. For growth-dimensions we start with 1 used_size = size.initial_size(rel_position, rastersize) # Now compute the rectangle that is currently occupied. The choords # of bottomright are *not* included. if rel_position[0] > 0: left = abs_position[0] right = left + used_size[0] else: right = abs_position[0] left = right - used_size[0] if rel_position[1] > 0: top = abs_position[1] bottom = top + used_size[1] else: bottom = abs_position[1] top = bottom - used_size[1] # Allocate used squares in matrix. If not all squares we need are free, # then the dashboard is too small for all dashlets (as it seems). # TEST: Dashlet auf 0/0 setzen, wenn kein Platz dafür da ist. try: for x in range(left, right): for y in range(top, bottom): if (x,y) in used_matrix: raise Exception() used_matrix[(x,y)] = True # Helper variable for how to grow, both x and y in [-1, 0, 1] grow_by = rel_position.compute_grow_by(size) positions.append((nr, True, left, top, right, bottom, grow_by)) except: positions.append((nr, False, left, top, right, bottom, (0,0))) # now resize all elastic dashlets to the max, but only # by one raster at a time, in order to be fair def try_resize(x, y, width, height): return False if x + width >= xmax or y + height >= ymax: return False for xx in range(x, x + width): for yy in range(y, y + height): if used_matrix[xx][yy]: return False for xx in range(x, x + width): for yy in range(y, y + height): used_matrix[xx][yy] = True return True # Das hier ist FALSCH! In Wirklichkeit muss ich nur prüfen, # ob der *Zuwachs* nicht in der Matrix belegt ist. Das jetzige # Rechteck muss ich ausklammern. Es ist ja schon belegt. def try_allocate(left, top, right, bottom): # Try if all needed squares are free for x in range(left, right): for y in range(top, bottom): if (x,y) in used_matrix: return False # Allocate all needed squares for x in range(left, right): for y in range(top, bottom): used_matrix[(x,y)] = True return True # Now try to expand all elastic rectangles as far as possible at_least_one_expanded = True while at_least_one_expanded: at_least_one_expanded = False new_positions = [] for (nr, visible, left, top, right, bottom, grow_by) in positions: if visible: # html.write(repr((nr, left, top, right, bottom, grow_by))) # try to grow in X direction by one if grow_by[0] > 0 and right < rastersize[0] and try_allocate(right, top, right+1, bottom): at_least_one_expanded = True right += 1 elif grow_by[0] < 0 and left > 0 and try_allocate(left-1, top, left, bottom): at_least_one_expanded = True left -= 1 # try to grow in Y direction by one if grow_by[1] > 0 and bottom < rastersize[1] and try_allocate(left, bottom, right, bottom+1): at_least_one_expanded = True bottom += 1 elif grow_by[1] < 0 and top > 0 and try_allocate(left, top-1, right, top): at_least_one_expanded = True top -= 1 new_positions.append((nr, visible, left, top, right, bottom, grow_by)) positions = new_positions resize_info = [] for nr, visible, left, top, right, bottom, grow_by in positions: # html.write(repr((nr, left, top, right, bottom, grow_by))) # html.write("<br>") title = board["dashlets"][nr].get("title") if title: th = title_height else: th = 0 resize_info.append([nr, visible and 1 or 0, left * raster[0], top * raster[1] + th, (right - left) * raster[0], (bottom - top) * raster[1] - th]) html.write(repr(resize_info)) def dashlet_overview(): html.write( '<table class=dashlet_overview>' '<tr><td valign=top>' '<a href="http://mathias-kettner.de/check_mk.html"><img style="margin-right: 30px;" src="images/check_mk.trans.120.png"></a>' '</td>' '<td><h2>Check_MK Multisite</h2>' 'Welcome to Check_MK Multisite. If you want to learn more about Multsite, please visit ' 'our <a href="http://mathias-kettner.de/checkmk_multisite.html">online documentation</a>. ' 'Multisite is part of <a href="http://mathias-kettner.de/check_mk.html">Check_MK</a> - an Open Source ' 'project by <a href="http://mathias-kettner.de">Mathias Kettner</a>.' '</td>' ) html.write('</tr></table>') def dashlet_mk_logo(): html.write('<a href="http://mathias-kettner.de/check_mk.html">' '<img style="margin-right: 30px;" src="images/check_mk.trans.120.png"></a>') def dashlet_hoststats(): table = [ ( _("Up"), "#0b3", "searchhost&is_host_scheduled_downtime_depth=0&hst0=on", "Stats: state = 0\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "StatsAnd: 2\n"), ( _("Down"), "#f00", "searchhost&is_host_scheduled_downtime_depth=0&hst1=on", "Stats: state = 1\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "StatsAnd: 2\n"), ( _("Unreachable"), "#f80", "searchhost&is_host_scheduled_downtime_depth=0&hst2=on", "Stats: state = 2\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "StatsAnd: 2\n"), ( _("In Downtime"), "#0af", "searchhost&search=1&is_host_scheduled_downtime_depth=1", "Stats: scheduled_downtime_depth > 0\n" \ ) ] filter = "Filter: custom_variable_names < _REALNAME\n" render_statistics("hoststats", "hosts", table, filter, "Host Statistics") def dashlet_servicestats(): table = [ ( _("OK"), "#0b3", "searchsvc&hst0=on&st0=on&is_in_downtime=0", "Stats: state = 0\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ( _("In Downtime"), "#0af", "searchsvc&is_in_downtime=1", "Stats: scheduled_downtime_depth > 0\n" \ "Stats: host_scheduled_downtime_depth > 0\n" \ "StatsOr: 2\n"), ( _("On Down host"), "#048", "searchsvc&hst1=on&hst2=on&hstp=on&is_in_downtime=0", "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state != 0\n" \ "StatsAnd: 3\n"), ( _("Warning"), "#ff0", "searchsvc&hst0=on&st1=on&is_in_downtime=0", "Stats: state = 1\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ( _("Unknown"), "#f80", "searchsvc&hst0=on&st3=on&is_in_downtime=0", "Stats: state = 3\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ( _("Critical"), "#f00", "searchsvc&hst0=on&st2=on&is_in_downtime=0", "Stats: state = 2\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ] filter = "Filter: host_custom_variable_names < _REALNAME\n" render_statistics("servicestats", "services", table, filter, "Service Statistics") def render_statistics(pie_id, what, table, filter, title=None): html.write("<div class=stats>") pie_diameter = 130 pie_left_aspect = 0.5 pie_right_aspect = 0.8 # Is the query restricted to a certain WATO-path? wato_folder = html.var("wato_folder") if wato_folder: # filter += "Filter: host_state = 0" filter += "Filter: host_filename ~ ^/wato/%s/\n" % wato_folder.replace("\n", "") # Is the query restricted to a host contact group? host_contact_group = html.var("host_contact_group") if host_contact_group: filter += "Filter: host_contact_groups >= %s\n" % host_contact_group.replace("\n", "") # Is the query restricted to a service contact group? service_contact_group = html.var("service_contact_group") if service_contact_group: filter += "Filter: service_contact_groups >= %s\n" % service_contact_group.replace("\n", "") query = "GET %s\n" % what for entry in table: query += entry[3] query += filter result = html.live.query_summed_stats(query) pies = zip(table, result) total = sum([x[1] for x in pies]) html.write('<div class=pie width=%d height=%d id="%s_stats" style="float: left"></div>' % (pie_diameter, pie_diameter, pie_id)) # html.write('<img src="images/globe.png" class="globe">') # html.write('<table class="hoststats%s" style="float:left">' % ( # len(pies) > 1 and " narrow" or "")) # table_entries = pies # while len(table_entries) < 6: # table_entries = table_entries + [ (("", "#95BBCD", "", ""), "&nbsp;") ] # table_entries.append(((_("Total"), "", "all%s" % what, ""), total)) # for (name, color, viewurl, query), count in table_entries: # url = "view.py?view_name=" + viewurl + "&filled_in=filter&search=1&wato_folder=" \ # + htmllib.urlencode(html.var("wato_folder", "")) # if host_contact_group: # url += '&opthost_contactgroup=' + host_contact_group # if service_contact_group: # url += '&optservice_contactgroup=' + service_contact_group # html.write('<tr><th><a href="%s">%s</a></th>' % (url, name)) # style = '' # if color: # style = ' style="background-color: %s"' % color # html.write('<td class=color%s>' # '</td><td><a href="%s">%s</a></td></tr>' % (style, url, count)) # html.write("</table>") data = [] for pie in pies: typeof = pie[0][0] count = pie[1] data.append([ typeof, count]) html.write("</div>") html.javascript(""" draw_hchart(id="%s", type="pie" , title="%s", name="%s", data=%s); """ % (pie_id, title, title, data)) def dashlet_pnpgraph(): render_pnpgraph( html.var("site"), html.var("host"), html.var("service"), int(html.var("source", 0)), int(html.var("view", 0)), ) def dashlet_nodata(): html.write("<div class=nograph><div class=msg>") html.write(html.var("message", _("No data available."))) html.write("</div></div>") def render_pnpgraph(site, host, service = None, source = 0, view = 0): if not host: html.message("Invalid URL to this dashlet. Missing <tt>host</tt>") return; if not service: service = "_HOST_" if not site: base_url = defaults.url_prefix else: base_url = html.site_status[site]["site"]["url_prefix"] base_url += "pnp4nagios/index.php/" var_part = "?host=%s&srv=%s&view=0&source=%d&view=%d&theme=multisite&_t=%d" % \ (pnp_cleanup(host), pnp_cleanup(service), source, view, int(time.time())) pnp_url = base_url + "graph" + var_part img_url = base_url + "image" + var_part html.write('<a href="%s"><img border=0 src="%s"></a>' % (pnp_url, img_url)) # load_plugins()
n = [] for i in [0, 1]: if self._data[i] < 0: n.append(size[i] + self._data[i] + 1) # Here was a bug fixed by Markus Lengler else: n.append(self._data[i] - 1) # make begin from 0 return vec(n)
identifier_body
dashboard.py
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # ails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. import config, defaults, htmllib, pprint, time from lib import * import wato # Python 2.3 does not have 'set' in normal namespace. # But it can be imported from 'sets' try: set() except NameError: from sets import Set as set loaded_with_language = False builtin_dashboards = {} # Declare constants to be used in the definitions of the dashboards GROW = 0 MAX = -1 # These settings might go into the config module, sometime in future, # in order to allow the user to customize this. header_height = 60 # Distance from top of the screen to the lower border of the heading screen_margin = 5 # Distance from the left border of the main-frame to the dashboard area dashlet_padding = 21, 5, 5, 0 # Margin (N, E, S, W) between outer border of dashlet and its content corner_overlap = 22 title_height = 0 # Height of dashlet title-box raster = 10, 10 # Raster the dashlet choords are measured in # Load plugins in web/plugins/dashboard and declare permissions, # note: these operations produce language-specific results and # thus must be reinitialized everytime a language-change has # been detected. def load_plugins(): global loaded_with_language if loaded_with_language == current_language: return # Permissions are currently not being defined. That will be the # case as soon as dashboards become editable. # Load plugins for dashboards. Currently these files # just may add custom dashboards by adding to builtin_dashboards. load_web_plugins("dashboard", globals()) # This must be set after plugin loading to make broken plugins raise # exceptions all the time and not only the first time (when the plugins # are loaded). loaded_with_language = current_language # In future there will be user editable dashboards just like # views which will be loaded. Currently we only use the builtin # dashboads. global dashboards dashboards = builtin_dashboards # HTML page handler for generating the (a) dashboard. The name # of the dashboard to render is given in the HTML variable 'name'. # This defaults to "main". def page_dashboard(): name = html.var("name", "main") if name not in dashboards: raise MKGeneralException("No such dashboard: '<b>%s</b>'" % name) render_dashboard(name) def add_wato_folder_to_url(url, wato_folder): if not wato_folder: return url elif '/' in url: return url # do not append wato_folder to non-Check_MK-urls elif '?' in url: return url + "&wato_folder=" + htmllib.urlencode(wato_folder) else: return url + "?wato_folder=" + htmllib.urlencode(wato_folder) # Actual rendering function def render_dashboard(name): board = dashboards[name] # The dashboard may be called with "wato_folder" set. In that case # the dashboard is assumed to restrict the shown data to a specific # WATO subfolder or file. This could be a configurable feature in # future, but currently we assume, that *all* dashboards are filename # sensitive. wato_folder = html.var("wato_folder") # When an empty wato_folder attribute is given a user really wants # to see only the hosts contained in the root folder. So don't ignore # the root folder anymore. #if not wato_folder: # ignore wato folder in case of root folder # wato_folder = None # The title of the dashboard needs to be prefixed with the WATO path, # in order to make it clear to the user, that he is seeing only partial # data. title = board["title"] global header_height if title is None: # If the title is none, hide the header line html.set_render_headfoot(False) header_height = 0 title = '' elif wato_folder is not None: title = wato.api.get_folder_title(wato_folder) + " - " + title stylesheets = ["pages", "dashboard", "status", "views"] stylesheets += ["theme/css/cloud-admin", "theme/css/themes/default", "theme/css/responsive", "theme/font-awesome/css/font-awesome.min", "theme/js/bootstrap-daterangepicker/daterangepicker-bs3", "theme/js/hubspot-messenger/css/messenger.min", "theme/js/hubspot-messenger/css/messenger-spinner.min", "theme/js/hubspot-messenger/css/messenger-theme-flat.min", "theme/js/jquery-ui-1.10.3.custom/css/custom-theme/jquery-ui-1.10.3.custom.min", "theme/js/bootstrap-switch/bootstrap-switch.min", "theme/css/flags/flags.min", "theme/css/fonts" ] javascripts = [] javascripts += ["dashboard"] html.header(title, javascripts=javascripts, stylesheets=stylesheets) html.write("""<div class="col-lg-12 margin-top-50" id="content">""") result = """ <!-- PAGE HEADER--> <div class="row"> <div class="col-sm-12"> <div class="page-header"> <!-- STYLER --> <!-- /STYLER --> <!-- BREADCRUMBS --> <ul class="breadcrumb"> <li> <i class="fa fa-home"></i> </li> <li>Dashboard</li> </ul> <!-- /BREADCRUMBS --> <div class="clearfix"> <h3 class="content-title pull-left">Dashboard</h3> </div> <div class="description">Blank Page</div> </div> </div> </div> <!-- /PAGE HEADER --> """ html.write(result) html.write(""" <div class="row"> <div class="col-sm-12"> """) # html.write("<div id=dashboard class=\"dashboard_%s\">\n" % name) # Container of all dashlets refresh_dashlets = [] # Dashlets with automatic refresh, for Javascript for nr, dashlet in enumerate(board["dashlets"]): # dashlets using the 'urlfunc' method will dynamically compute # an url (using HTML context variables at their wish). if "urlfunc" in dashlet: dashlet["url"] = dashlet["urlfunc"]() # dashlets using the 'url' method will be refreshed by us. Those # dashlets using static content (such as an iframe) will not be # refreshed by us but need to do that themselves. if "url" in dashlet: refresh_dashlets.append([nr, dashlet.get("refresh", 0), str(add_wato_folder_to_url(dashlet["url"], wato_folder))]) # Paint the dashlet's HTML code render_dashlet(nr, dashlet, wato_folder) html.write("</div>\n") html.write(""" </div> </div> """) # html.write("""</div>""") # Put list of all autorefresh-dashlets into Javascript and also make sure, # that the dashbaord is painted initially. The resize handler will make sure # that every time the user resizes the browser window the layout will be re-computed # and all dashlets resized to their new positions and sizes. html.javascript(""" //var header_height = %d; //var screen_margin = %d; //var title_height = %d; //var dashlet_padding = Array%s; //var corner_overlap = %d; var refresh_dashlets = %r; //var dashboard_name = '%s'; //set_dashboard_size(); //window.onresize = function () { set_dashboard_size(); } //window.onload = function () { set_dashboard_size(); } dashboard_scheduler(1); """ % (header_height, screen_margin, title_height, dashlet_padding, corner_overlap, refresh_dashlets, name)) html.body_end() # omit regular footer with status icons, etc. # Create the HTML code for one dashlet. Each dashlet has an id "dashlet_%d", # where %d is its index (in board["dashlets"]). Javascript uses that id # for the resizing. Within that div there is an inner div containing the # actual dashlet content. The margin between the inner and outer div is # used for stylish layout stuff (shadows, etc.) def render_dashlet(nr, dashlet, wato_folder): html.write(""" <div class="col-md-6"> """) # html.write('<div class=dashlet id="dashlet_%d">' % nr) # # render shadow # # if dashlet.get("shadow", True): # # for p in [ "nw", "ne", "sw", "se", "n", "s", "w", "e" ]: # # html.write('<img id="dashadow_%s_%d" class="shadow %s" src="images/dashadow-%s.png">' % # # (p, nr, p, p)) # if dashlet.get("title"): # url = dashlet.get("title_url", None) # if url: # title = '<a href="%s">%s</a>' % (url, dashlet["title"]) # else: # title = dashlet["title"] # html.write('<div class="title" id="dashlet_title_%d">%s</div>' % (nr, title)) # if dashlet.get("background", True): # bg = " background" # else: # bg = "" # html.write('<div class="dashlet_inner%s" id="dashlet_inner_%d">' % (bg, nr)) # html.write( "%s" % dashlet) # # Optional way to render a dynamic iframe URL if "iframefunc" in dashlet: dashlet["iframe"] = dashlet["iframefunc"]() # # The method "view" is a shortcut for "iframe" with a certain url if "view" in dashlet: dashlet["iframe"] = "view.py?view_name=%s&_display_options=HRSIXL&_body_class=dashlet" % dashlet["view"] html.write(""" <div class="box border"> <div class="box-title"> <h4><i class="fa fa-adjust"></i>%s</h4> <div class="tools hidden-xs"> <a class="config" data-toggle="modal" href="#box-config"> <i class="fa fa-cog"></i> </a> </div> </div> <div class="box-body"> <div class="chart" id="dashlet_%s" style="padding: 0px; position: relative;"> """ % (dashlet["title"], nr)) # # The content is rendered only if it is fixed. In the # # other cases the initial (re)-size will paint the content. if "content" in dashlet: # fixed content html.write(dashlet["content"]) elif "iframe" in dashlet: # fixed content containing iframe # Fix of iPad >:-P html.write('<div style="width: 100%; height: 100%; -webkit-overflow-scrolling:touch; overflow: auto;">') html.write('<iframe allowTransparency="true" frameborder="0" width="100%%" height="100%%" src="%s"></iframe>' % add_wato_folder_to_url(dashlet["iframe"], wato_folder)) html.write('</div>') html.write('<div class="dashlet_inner" id="dashlet_inner_%d">' % (nr)) html.write("</div>") html.write(""" </div> </div> </div> """ ) html.write("</div>\n") # Here comes the brain stuff: An intelligent liquid layout algorithm. # It is called via ajax, mainly because I was not eager to code this # directly in Javascript (though this would be possible and probably # more lean.) # Compute position and size of all dashlets def ajax_resize(): # computation with vectors class vec: def __init__(self, xy): self._data = xy def __div__(self, xy): return vec((self._data[0] / xy[0], self._data[1] / xy[1])) def __repr__(self): return repr(self._data) def __getitem__(self, i): return self._data[i] def make_absolute(self, size): n = [] for i in [0, 1]: if self._data[i] < 0: n.append(size[i] + self._data[i] + 1) # Here was a bug fixed by Markus Lengler else: n.append(self._data[i] - 1) # make begin from 0 return vec(n) # Compute the initial size of the dashlet. If MAX is used, # then the dashlet consumes all space in its growing direction, # regardless of any other dashlets. def initial_size(self, position, rastersize): n = [] for i in [0, 1]: if self._data[i] == MAX: n.append(rastersize[i] - abs(position[i]) + 1) elif self._data[i] == GROW: n.append(1) else: n.append(self._data[i]) return n def compute_grow_by(self, size): n = [] for i in [0, 1]: if size[i] != GROW: # absolute size, no growth n.append(0) elif self._data[i] < 0: n.append(-1) # grow direction left, up else: n.append(1) # grow direction right, down return n def __add__(self, b): return vec((self[0] + b[0], self[1] + b[1])) board = dashboards[html.var("name")] screensize = vec((int(html.var("width")), int(html.var("height")))) rastersize = screensize / raster used_matrix = {} # keep track of used raster elements # first place all dashlets at their absolute positions positions = [] for nr, dashlet in enumerate(board["dashlets"]): # Relative position is as noted in the declaration. 1,1 => top left origin, # -1,-1 => bottom right origin, 0 is not allowed here rel_position = vec(dashlet["position"]) # starting from 1, negative means: from right/bottom # Compute the absolute position, this time from 0 to rastersize-1 abs_position = rel_position.make_absolute(rastersize) # The size in raster-elements. A 0 for a dimension means growth. No negative values here. size = vec(dashlet["size"]) # Compute the minimum used size for the dashlet. For growth-dimensions we start with 1 used_size = size.initial_size(rel_position, rastersize) # Now compute the rectangle that is currently occupied. The choords # of bottomright are *not* included. if rel_position[0] > 0: left = abs_position[0] right = left + used_size[0] else: right = abs_position[0] left = right - used_size[0] if rel_position[1] > 0: top = abs_position[1] bottom = top + used_size[1] else: bottom = abs_position[1] top = bottom - used_size[1] # Allocate used squares in matrix. If not all squares we need are free, # then the dashboard is too small for all dashlets (as it seems). # TEST: Dashlet auf 0/0 setzen, wenn kein Platz dafür da ist. try: for x in range(left, right): for y in range(top, bottom): if (x,y) in used_matrix: raise Exception() used_matrix[(x,y)] = True # Helper variable for how to grow, both x and y in [-1, 0, 1] grow_by = rel_position.compute_grow_by(size) positions.append((nr, True, left, top, right, bottom, grow_by)) except: positions.append((nr, False, left, top, right, bottom, (0,0))) # now resize all elastic dashlets to the max, but only # by one raster at a time, in order to be fair def try_resize(x, y, width, height): return False if x + width >= xmax or y + height >= ymax: return False for xx in range(x, x + width): for yy in range(y, y + height): if used_matrix[xx][yy]: return False for xx in range(x, x + width): for yy in range(y, y + height): used_matrix[xx][yy] = True return True # Das hier ist FALSCH! In Wirklichkeit muss ich nur prüfen, # ob der *Zuwachs* nicht in der Matrix belegt ist. Das jetzige # Rechteck muss ich ausklammern. Es ist ja schon belegt. def try_allocate(left, top, right, bottom): # Try if all needed squares are free for x in range(left, right): for y in range(top, bottom): if (x,y) in used_matrix: return False # Allocate all needed squares for x in range(left, right): for y in range(top, bottom): used_matrix[(x,y)] = True return True # Now try to expand all elastic rectangles as far as possible at_least_one_expanded = True while at_least_one_expanded: at_least_one_expanded = False new_positions = [] for (nr, visible, left, top, right, bottom, grow_by) in positions: if visible: # html.write(repr((nr, left, top, right, bottom, grow_by))) # try to grow in X direction by one if grow_by[0] > 0 and right < rastersize[0] and try_allocate(right, top, right+1, bottom): at_least_one_expanded = True right += 1 elif grow_by[0] < 0 and left > 0 and try_allocate(left-1, top, left, bottom): at_least_one_expanded = True left -= 1 # try to grow in Y direction by one if grow_by[1] > 0 and bottom < rastersize[1] and try_allocate(left, bottom, right, bottom+1): at_least_one_expanded = True bottom += 1 elif grow_by[1] < 0 and top > 0 and try_allocate(left, top-1, right, top): at_least_one_expanded = True top -= 1 new_positions.append((nr, visible, left, top, right, bottom, grow_by)) positions = new_positions resize_info = [] for nr, visible, left, top, right, bottom, grow_by in positions: # html.write(repr((nr, left, top, right, bottom, grow_by))) # html.write("<br>") title = board["dashlets"][nr].get("title") if title: th = title_height else: th = 0 resize_info.append([nr, visible and 1 or 0, left * raster[0], top * raster[1] + th, (right - left) * raster[0], (bottom - top) * raster[1] - th]) html.write(repr(resize_info)) def dashlet_overview(): html.write( '<table class=dashlet_overview>' '<tr><td valign=top>' '<a href="http://mathias-kettner.de/check_mk.html"><img style="margin-right: 30px;" src="images/check_mk.trans.120.png"></a>' '</td>' '<td><h2>Check_MK Multisite</h2>' 'Welcome to Check_MK Multisite. If you want to learn more about Multsite, please visit ' 'our <a href="http://mathias-kettner.de/checkmk_multisite.html">online documentation</a>. ' 'Multisite is part of <a href="http://mathias-kettner.de/check_mk.html">Check_MK</a> - an Open Source ' 'project by <a href="http://mathias-kettner.de">Mathias Kettner</a>.' '</td>' ) html.write('</tr></table>') def dashlet_mk_logo(): html.write('<a href="http://mathias-kettner.de/check_mk.html">' '<img style="margin-right: 30px;" src="images/check_mk.trans.120.png"></a>') def dashlet_hoststats(): table = [ ( _("Up"), "#0b3", "searchhost&is_host_scheduled_downtime_depth=0&hst0=on", "Stats: state = 0\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "StatsAnd: 2\n"), ( _("Down"), "#f00", "searchhost&is_host_scheduled_downtime_depth=0&hst1=on", "Stats: state = 1\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "StatsAnd: 2\n"), ( _("Unreachable"), "#f80", "searchhost&is_host_scheduled_downtime_depth=0&hst2=on", "Stats: state = 2\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "StatsAnd: 2\n"), ( _("In Downtime"), "#0af", "searchhost&search=1&is_host_scheduled_downtime_depth=1", "Stats: scheduled_downtime_depth > 0\n" \ ) ] filter = "Filter: custom_variable_names < _REALNAME\n" render_statistics("hoststats", "hosts", table, filter, "Host Statistics") def dashlet_servicestats(): table = [ ( _("OK"), "#0b3", "searchsvc&hst0=on&st0=on&is_in_downtime=0", "Stats: state = 0\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ( _("In Downtime"), "#0af", "searchsvc&is_in_downtime=1", "Stats: scheduled_downtime_depth > 0\n" \ "Stats: host_scheduled_downtime_depth > 0\n" \ "StatsOr: 2\n"), ( _("On Down host"), "#048", "searchsvc&hst1=on&hst2=on&hstp=on&is_in_downtime=0", "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state != 0\n" \ "StatsAnd: 3\n"), ( _("Warning"), "#ff0", "searchsvc&hst0=on&st1=on&is_in_downtime=0", "Stats: state = 1\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ( _("Unknown"), "#f80", "searchsvc&hst0=on&st3=on&is_in_downtime=0", "Stats: state = 3\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ( _("Critical"), "#f00", "searchsvc&hst0=on&st2=on&is_in_downtime=0", "Stats: state = 2\n" \ "Stats: scheduled_downtime_depth = 0\n" \ "Stats: host_scheduled_downtime_depth = 0\n" \ "Stats: host_state = 0\n" \ "Stats: host_has_been_checked = 1\n" \ "StatsAnd: 5\n"), ] filter = "Filter: host_custom_variable_names < _REALNAME\n" render_statistics("servicestats", "services", table, filter, "Service Statistics") def render_statistics(pie_id, what, table, filter, title=None): html.write("<div class=stats>") pie_diameter = 130 pie_left_aspect = 0.5 pie_right_aspect = 0.8 # Is the query restricted to a certain WATO-path? wato_folder = html.var("wato_folder") if wato_folder: # filter += "Filter: host_state = 0" filter += "Filter: host_filename ~ ^/wato/%s/\n" % wato_folder.replace("\n", "") # Is the query restricted to a host contact group? host_contact_group = html.var("host_contact_group") if host_contact_group: filter += "Filter: host_contact_groups >= %s\n" % host_contact_group.replace("\n", "") # Is the query restricted to a service contact group? service_contact_group = html.var("service_contact_group") if service_contact_group: filter += "Filter: service_contact_groups >= %s\n" % service_contact_group.replace("\n", "") query = "GET %s\n" % what for entry in table: query += entry[3] query += filter result = html.live.query_summed_stats(query) pies = zip(table, result) total = sum([x[1] for x in pies]) html.write('<div class=pie width=%d height=%d id="%s_stats" style="float: left"></div>' % (pie_diameter, pie_diameter, pie_id)) # html.write('<img src="images/globe.png" class="globe">') # html.write('<table class="hoststats%s" style="float:left">' % ( # len(pies) > 1 and " narrow" or "")) # table_entries = pies # while len(table_entries) < 6: # table_entries = table_entries + [ (("", "#95BBCD", "", ""), "&nbsp;") ] # table_entries.append(((_("Total"), "", "all%s" % what, ""), total)) # for (name, color, viewurl, query), count in table_entries: # url = "view.py?view_name=" + viewurl + "&filled_in=filter&search=1&wato_folder=" \ # + htmllib.urlencode(html.var("wato_folder", "")) # if host_contact_group: # url += '&opthost_contactgroup=' + host_contact_group # if service_contact_group: # url += '&optservice_contactgroup=' + service_contact_group # html.write('<tr><th><a href="%s">%s</a></th>' % (url, name)) # style = '' # if color: # style = ' style="background-color: %s"' % color # html.write('<td class=color%s>' # '</td><td><a href="%s">%s</a></td></tr>' % (style, url, count)) # html.write("</table>") data = [] for pie in pies: typeof = pie[0][0] count = pie[1] data.append([ typeof, count]) html.write("</div>") html.javascript(""" draw_hchart(id="%s", type="pie" , title="%s", name="%s", data=%s); """ % (pie_id, title, title, data)) def dashlet_pnpgraph(): render_pnpgraph( html.var("site"), html.var("host"), html.var("service"), int(html.var("source", 0)), int(html.var("view", 0)), ) def dashlet_nodata(): html.write("<div class=nograph><div class=msg>") html.write(html.var("message", _("No data available."))) html.write("</div></div>") def render_pnpgraph(site, host, service = None, source = 0, view = 0): if not host: html.message("Invalid URL to this dashlet. Missing <tt>host</tt>") return; if not service: service = "_HOST_" if not site: base_url = defaults.url_prefix else: base_url = html.site_status[site]["site"]["url_prefix"] base_url += "pnp4nagios/index.php/" var_part = "?host=%s&srv=%s&view=0&source=%d&view=%d&theme=multisite&_t=%d" % \ (pnp_cleanup(host), pnp_cleanup(service), source, view, int(time.time())) pnp_url = base_url + "graph" + var_part
html.write('<a href="%s"><img border=0 src="%s"></a>' % (pnp_url, img_url)) # load_plugins()
img_url = base_url + "image" + var_part
random_line_split
update.go
package nodes import ( "chiastat/chia/network" "chiastat/chia/types" chiautils "chiastat/chia/utils" "chiastat/utils" "context" "encoding/binary" "encoding/hex" "flag" "io" "log" "net" "net/http" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/abh/geoip" "github.com/ansel1/merry" "github.com/go-pg/pg/v10" pgtypes "github.com/go-pg/pg/v10/types" "github.com/gorilla/websocket" ) func joinHostPort(host string, port uint16) string { return net.JoinHostPort(host, strconv.Itoa(int(port))) } func joinHostPortToHashKey(host string, port uint16) string { var buf []byte if ip := net.ParseIP(host); ip != nil { if ip4 := ip.To4(); ip4 != nil { buf = make([]byte, 4+2) copy(buf, ip4) } else { buf = make([]byte, 16+2) copy(buf, ip) } } else { buf = make([]byte, len(host)+3) //extra zero as separator copy(buf, []byte(host)) } binary.BigEndian.PutUint16(buf[len(buf)-2:], port) return string(buf) } func askIP() (string, error) { resp, err := http.Get("https://checkip.amazonaws.com/") if err != nil { return "", merry.Wrap(err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", merry.Wrap(err) } return strings.TrimSpace(string(body)), nil } type ConnListItem struct { prev *ConnListItem next *ConnListItem conn *network.WSChiaConnection stop chan struct{} } type ConnList struct { limit int64 length int64 start *ConnListItem end *ConnListItem mutex sync.Mutex } func (l *ConnList) PushConn(c *network.WSChiaConnection) *ConnListItem { l.mutex.Lock() defer l.mutex.Unlock() item := &ConnListItem{prev: l.end, conn: c, stop: make(chan struct{}, 1)} if l.end != nil { l.end.next = item } l.end = item if l.start == nil { l.start = item } l.length += 1 return item } func (l *ConnList) delItemNoLock(item *ConnListItem) { next := item.next prev := item.prev if l.start == item { l.start = next } else { item.prev.next = next } if l.end == item { l.end = prev } else { item.next.prev = prev } item.prev = nil item.next = nil l.length -= 1 } func (l *ConnList) DelItemUnlesRemoved(item *ConnListItem) { l.mutex.Lock() defer l.mutex.Unlock() if item.prev == nil && item.next == nil && l.start != item { return } l.delItemNoLock(item) } func (l *ConnList) ShiftIfNeed() *ConnListItem { l.mutex.Lock() defer l.mutex.Unlock() if l.length <= l.limit { return nil } if l.start == nil { return nil } item := l.start l.delItemNoLock(item) return item } type NodeAddr struct { Host string Port uint16 Country *string } type Node struct { ID []byte Host string Port uint16 ProtocolVersion string SoftwareVersion string NodeType string Country *string } type NodeAddrListAsPGTuple []*NodeAddr func (l NodeAddrListAsPGTuple) AppendValue(b []byte, flags int) ([]byte, error) { // flags: https://github.com/go-pg/pg/blob/c9ee578a38d6866649072df18a3dbb36ff369747/types/flags.go for i, item := range l { if i > 0 { b = append(b, ',') } b = append(b, '(') b = pgtypes.AppendString(b, item.Host, 1) //quoteFlag=1 b = append(b, ',') b = append(b, []byte(strconv.FormatInt(int64(item.Port), 10))...) b = append(b, ')') } return b, nil } type NodeListAsPGIDs []*Node func (l NodeListAsPGIDs) AppendValue(b []byte, flags int) ([]byte, error) { // flags: https://github.com/go-pg/pg/blob/c9ee578a38d6866649072df18a3dbb36ff369747/types/flags.go idBuf := make([]byte, 80) for i, item := range l { if i > 0 { b = append(b, ',') } idLen := hex.Encode(idBuf, item.ID) b = append(b, []byte("'\\x")...) b = append(b, idBuf[:idLen]...) b = append(b, '\'') } return b, nil } func startOldNodesLoader(db *pg.DB, nodesChan chan *NodeAddr, chunkSize int) utils.Worker { worker := utils.NewSimpleWorker(1) rawNodesChunkSize := chunkSize nodesChunkSize := chunkSize / 8 if nodesChunkSize == 0 { nodesChunkSize = 1 } go func() { defer worker.Done() ctx := context.Background() nodes := make([]*Node, nodesChunkSize) rawNodes := make([]*NodeAddr, rawNodesChunkSize) for { nodes := nodes[:0] rawNodes := rawNodes[:0] var getDur, updDur int64 var getDurRaw, updDurRaw int64 err := db.RunInTransaction(ctx, func(tx *pg.Tx) error { stt := time.Now().UnixNano() _, err := tx.Query(&rawNodes, ` SELECT host, port FROM raw_nodes WHERE NOT seems_off AND (checked_at IS NULL OR checked_at < now() - INTERVAL '5 hours') ORDER BY checked_at ASC NULLS FIRST LIMIT ? FOR NO KEY UPDATE`, chunkSize) getDurRaw = time.Now().UnixNano() - stt if utils.IsPGDeadlock(err) { return nil } if err != nil { return merry.Wrap(err) } if len(rawNodes) == 0 { return nil } stt = time.Now().UnixNano() _, err = tx.Exec(` UPDATE raw_nodes SET checked_at = NOW() WHERE (host,port) IN (?)`, NodeAddrListAsPGTuple(rawNodes)) updDurRaw = time.Now().UnixNano() - stt return merry.Wrap(err) }) if err != nil { worker.AddError(err) return } err = db.RunInTransaction(ctx, func(tx *pg.Tx) error { stt := time.Now().UnixNano() _, err := tx.Query(&nodes, ` SELECT id, host, port FROM nodes WHERE NOT seems_off AND (checked_at IS NULL OR checked_at < now() - INTERVAL '5 hours') ORDER BY checked_at ASC NULLS FIRST LIMIT ? FOR NO KEY UPDATE`, chunkSize) getDur = time.Now().UnixNano() - stt if utils.IsPGDeadlock(err) { return nil } if err != nil { return merry.Wrap(err) } if len(nodes) == 0 { return nil } stt = time.Now().UnixNano() _, err = tx.Exec(` UPDATE nodes SET checked_at = NOW() WHERE id IN (?)`, NodeListAsPGIDs(nodes)) updDur = time.Now().UnixNano() - stt return merry.Wrap(err) }) if err != nil { worker.AddError(err) return } log.Printf("LOAD: nodes=%d (get:%dms, upd:%dms), raw=%d (get:%dms, upd:%dms)", len(nodes), getDur/1000/1000, updDur/1000/1000, len(rawNodes), getDurRaw/1000/1000, updDurRaw/1000/1000) if len(nodes) == 0 && len(rawNodes) == 0 { time.Sleep(10 * time.Second) } for _, node := range rawNodes { nodesChan <- node } for _, node := range nodes { nodesChan <- &NodeAddr{Host: node.Host, Port: node.Port} } } }() return worker } func startNodesChecker(db *pg.DB, sslDir string, nodesInChan chan *NodeAddr, nodesOutChan chan *Node, rawNodesOutChan chan []types.TimestampedPeerInfo, concurrency int) utils.Worker { worker := utils.NewSimpleWorker(concurrency) var totalCount int64 = 0 var totalCountOk int64 = 0 var stampCount int64 = 0 stamp := time.Now().UnixNano() logPrint := utils.NewSyncInterval(10*time.Second, func() { curStampCount := atomic.SwapInt64(&stampCount, 0) curStamp := atomic.SwapInt64(&stamp, time.Now().UnixNano()) log.Printf("CHECK: nodes checked: %d, ok: %d, rpm: %.2f", atomic.LoadInt64(&totalCount), atomic.LoadInt64(&totalCountOk), float64(curStampCount*60*1000*1000*1000)/float64(time.Now().UnixNano()-curStamp)) }) for i := 0; i < concurrency; i++ { go func() { defer worker.Done() tlsCfg, err := network.MakeTSLConfigFromFiles( sslDir+"/ca/chia_ca.crt", sslDir+"/full_node/public_full_node.crt", sslDir+"/full_node/public_full_node.key") if err != nil { worker.AddError(err) return } handleNode := func(node *NodeAddr) error { cfg := &network.WSChiaConnConfig{Dialer: &websocket.Dialer{ HandshakeTimeout: 5 * time.Second, }} c, err := network.ConnectTo(joinHostPort(node.Host, node.Port), tlsCfg, cfg) if err != nil { return merry.Wrap(err) } hs, err := c.PerformHandshake() if err != nil { return merry.Wrap(err) } c.StartRoutines() id := c.PeerID() nodeType, _ := types.NodeTypeName(hs.NodeType) if len(nodesOutChan) == cap(nodesOutChan) { time.Sleep(time.Millisecond) //throttling } nodesOutChan <- &Node{ ID: id[:], Host: c.RemoteAddr().(*net.TCPAddr).IP.String(), Port: hs.ServerPort, ProtocolVersion: hs.ProtocolVersion, SoftwareVersion: hs.SoftwareVersion, NodeType: nodeType, } for i := 0; i < 3; i++ { peers, err := c.RequestPeers() if err != nil { break } if len(rawNodesOutChan) == cap(rawNodesOutChan) { time.Sleep(time.Millisecond) //throttling } rawNodesOutChan <- peers.PeerList } return nil } for node := range nodesInChan { err := handleNode(node) if err == nil { atomic.AddInt64(&totalCountOk, 1) } atomic.AddInt64(&stampCount, 1) atomic.AddInt64(&totalCount, 1) logPrint.Trigger() } }() } return worker } func startRawNodesFilter(db *pg.DB, nodeChunksChan chan []types.TimestampedPeerInfo, nodesChan chan *NodeAddr) utils.Worker { worker := utils.NewSimpleWorker(2) cleanupInterval := int64(10 * 60) updateInterval := int64(60 * 60) go func() { defer worker.Done() nodeStamps := make(map[string]int64) lastCleanupStamp := time.Now().Unix() chunksCount := 0 countUsed := 0 countTotal := 0 logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("FILTER: use ratio: %.1f%%, raw nodes in filter: %d", float64(countUsed*100)/float64(countTotal), len(nodeStamps)) countUsed = 0 countTotal = 0 }) prefillNodes := make([]*NodeAddr, 500*1000) _, err := db.Query(&prefillNodes, ` SELECT host, port FROM raw_nodes WHERE NOT seems_off AND updated_at > now() - ? * INTERVAL '1 second' ORDER BY checked_at ASC NULLS FIRST LIMIT ?`, updateInterval, len(prefillNodes)) if err == nil { now := time.Now().Unix() for i, node := range prefillNodes { nodeStamps[joinHostPortToHashKey(node.Host, node.Port)] = now - updateInterval*int64(i)/int64(len(prefillNodes)) } log.Printf("FILTER: prefilled with %d node(s)", len(nodeStamps)) } else { log.Printf("FILTER: failed to prefill: %s", err) } for chunk := range nodeChunksChan { now := time.Now().Unix() if now-lastCleanupStamp > cleanupInterval { count := 0 for addr, stamp := range nodeStamps { if now-stamp > updateInterval { delete(nodeStamps, addr) count += 1 } } log.Printf("FILTER: raw nodes cleanup: %d removed, %d remaining", count, len(nodeStamps)) lastCleanupStamp = now } for _, node := range chunk { key := joinHostPortToHashKey(node.Host, node.Port) if stamp, ok := nodeStamps[key]; !ok || now-stamp > updateInterval { nodesChan <- &NodeAddr{Host: node.Host, Port: node.Port} nodeStamps[key] = now countUsed += 1 } } countTotal += len(chunk) chunksCount += 1 logPrint.Trigger() } }() return worker } func tryGetCountry(gdb, gdb6 *geoip.GeoIP, host string, tryResolve bool) *string { hostIP := net.ParseIP(host) if hostIP == nil { if tryResolve { addrs, _ := net.LookupHost(host) ipFound := false for _, addr := range addrs { hostIP = net.ParseIP(addr) if hostIP != nil { host = addr ipFound = true break } } if !ipFound { return nil } } else { return nil } } if hostIP.To4() == nil { if code, _ := gdb6.GetCountry_v6(host); code != "" { return &code } } else { if code, _ := gdb.GetCountry(host); code != "" { return &code } } return nil } func startNodesLocationChecker(gdb, gdb6 *geoip.GeoIP, nodesIn, nodesOut chan *Node, rawNodesIn, rawNodesOut chan *NodeAddr, numWorkers int) utils.Worker { worker := utils.NewSimpleWorker(2 * numWorkers) for i := 0; i < numWorkers; i++ { go func() { defer worker.Done() for node := range nodesIn { node.Country = tryGetCountry(gdb, gdb6, node.Host, true) nodesOut <- node } close(nodesOut) }() } for i := 0; i < numWorkers; i++ { go func() { defer worker.Done() for node := range rawNodesIn { node.Country = tryGetCountry(gdb, gdb6, node.Host, false) rawNodesOut <- node } close(rawNodesOut) }() } return worker } func startNodesSaver(db *pg.DB, nodesChan chan *Node, chunkSize int) utils.Worker { worker := utils.NewSimpleWorker(1) nodesChanI := make(chan interface{}, 16) count := 0 go func() { for node := range nodesChan { nodesChanI <- node } close(nodesChanI) }() go func() { defer worker.Done() var savesDurSum, savesDurCount int64 logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("SAVE: count: +%d (%d chunks, avg %d ms)", count, savesDurCount, savesDurSum/savesDurCount) savesDurSum = 0 savesDurCount = 0 count = 0 }) err := utils.SaveChunked(db, chunkSize, nodesChanI, func(tx *pg.Tx, items []interface{}) error { for _, nodeI := range items { node := nodeI.(*Node) _, err := tx.Exec(` INSERT INTO nodes (id, host, port, protocol_version, software_version, node_type, country, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, now()) ON CONFLICT (id) DO UPDATE SET host = EXCLUDED.host, port = EXCLUDED.port, protocol_version = EXCLUDED.protocol_version, software_version = EXCLUDED.software_version, node_type = EXCLUDED.node_type, country = EXCLUDED.country, seems_off = false, updated_at = now()`, node.ID, node.Host, node.Port, node.ProtocolVersion, node.SoftwareVersion, node.NodeType, node.Country, ) if err != nil { return merry.Wrap(err) } count += 1 } return nil }, func(saveDur time.Duration) { savesDurSum += int64(saveDur / time.Millisecond) savesDurCount += 1 logPrint.Trigger() }) log.Println("SAVE: done") if err != nil { worker.AddError(err) } }() return worker } func startRawNodesSaver(db *pg.DB, nodesChan chan *NodeAddr, chunkSize int) utils.Worker { worker := utils.NewSimpleWorker(1) nodesChanI := make(chan interface{}, 16) count := 0 go func() { for node := range nodesChan { nodesChanI <- node } close(nodesChanI) }() go func() { defer worker.Done() var savesDurSum, savesDurCount int64 logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("SAVE:RAW: count: +%d (%d chunks, avg %d ms)", count, savesDurCount, savesDurSum/savesDurCount) savesDurSum = 0 savesDurCount = 0 count = 0 }) err := utils.SaveChunked(db, chunkSize, nodesChanI, func(tx *pg.Tx, items []interface{}) error { for _, nodeI := range items { node := nodeI.(*NodeAddr) _, err := tx.Exec(` INSERT INTO raw_nodes (host, port, country, updated_at) VALUES (?, ?, ?, now()) ON CONFLICT (host, port) DO UPDATE SET country = EXCLUDED.country, seems_off = false, updated_at = now()`, node.Host, node.Port, node.Country, ) if err != nil { return merry.Wrap(err) } count += 1 } return nil }, func(saveDur time.Duration) { savesDurSum += int64(saveDur / time.Millisecond) savesDurCount += 1 logPrint.Trigger() }) log.Println("SAVE:RAW: done") if err != nil { worker.AddError(err) } }() return worker } func startNodesListener(sslDir string, nodesChan chan *Node, rawNodesChan chan []types.TimestampedPeerInfo) utils.Worker { worker := utils.NewSimpleWorker(1) type ConnListItem struct { prev *ConnListItem next *ConnListItem conn *network.WSChiaConnection stop chan struct{} } connList := ConnList{limit: 1024} go func() { defer worker.Done() var newConns, peersCount, unexpectedPeersCount int64 logPrint := utils.NewSyncInterval(10*time.Second, func() { peersCount := atomic.SwapInt64(&peersCount, 0) unexpCount := atomic.SwapInt64(&unexpectedPeersCount, 0) curNewConns := atomic.SwapInt64(&newConns, 0) log.Printf("LISTEN: conns: %d (+%d), peers: +%d, unexp.peers: +%d", atomic.LoadInt64(&connList.length), curNewConns, peersCount, unexpCount) }) connHandler := func(c *network.WSChiaConnection) { atomic.AddInt64(&newConns, 1) connItem := connList.PushConn(c) defer func() { connList.DelItemUnlesRemoved(connItem) c.Close() }() if item := connList.ShiftIfNeed(); item != nil { item.stop <- struct{}{} } shortID := c.PeerIDHex()[0:8] logPrint.Trigger() c.SetMessageHandler(func(msgID uint16, msg chiautils.FromBytes) { switch msg := msg.(type) { case *types.RequestPeers: c.SendReply(msgID, types.RespondPeers{PeerList: nil}) case *types.RespondPeers: atomic.AddInt64(&unexpectedPeersCount, int64(len(msg.PeerList))) rawNodesChan <- msg.PeerList case *types.RequestBlock: c.SendReply(msgID, types.RejectBlock{Height: msg.Height}) case *types.NewPeak, *types.NewCompactVDF, *types.NewSignagePointOrEndOfSubSlot, *types.NewUnfinishedBlock, *types.RequestMempoolTransactions, *types.NewTransaction: // do nothing default: log.Printf("LISTEN: %s: unexpected message: %#v", shortID, msg) } }) hs, err := c.PerformHandshake() if err != nil { log.Printf("LISTEN: %s: handshake error: %s", shortID, err) c.Close() return } c.StartRoutines() id := c.PeerID() nodeType, _ := types.NodeTypeName(hs.NodeType) nodesChan <- &Node{ ID: id[:], Host: c.RemoteAddr().(*net.TCPAddr).IP.String(), Port: hs.ServerPort, ProtocolVersion: hs.ProtocolVersion, SoftwareVersion: hs.SoftwareVersion, NodeType: nodeType, } for i := 0; ; i++ { peers, err := c.RequestPeers() if err != nil { log.Printf("LISTEN: %s: peers error: %s", shortID, err) return } timer := time.NewTimer(30 * time.Second) select { case rawNodesChan <- peers.PeerList: atomic.AddInt64(&peersCount, int64(len(peers.PeerList))) timer.Stop() case <-timer.C: log.Printf("LISTEN: %s: timeout adding peers chunk", shortID) } if i%60 == 59 { ip, err := askIP() if err == nil { c.Send(types.RespondPeers{PeerList: []types.TimestampedPeerInfo{ {Host: ip, Port: network.SERVER_PORT, Timestamp: uint64(time.Now().Unix())}, }}) } else { log.Printf("LISTEN: %s: ask IP error: %s", shortID, err) } } logPrint.Trigger() timer = time.NewTimer(5 * time.Minute) select { case <-connItem.stop: timer.Stop() return case <-timer.C: // } } }
if err != nil { worker.AddError(err) } }() return worker } func startSeemsOffUpdater(db *pg.DB) utils.Worker { worker := utils.NewSimpleWorker(1) go func() { defer worker.Done() for { stt := time.Now().UnixNano() _, err := db.Exec(` UPDATE nodes SET seems_off = true WHERE NOT seems_off AND checked_at IS NOT NULL AND updated_at < NOW() - INTERVAL '7 days'`) if err != nil { log.Printf("SEEMS_OFF:RAW: error: %s", err) } dur := time.Now().UnixNano() - stt stt = time.Now().UnixNano() _, err = db.Exec(` UPDATE raw_nodes SET seems_off = true WHERE NOT seems_off AND checked_at IS NOT NULL AND updated_at < NOW() - INTERVAL '7 days'`) if err != nil { log.Printf("SEEMS_OFF: raw error: %s", err) } durRaw := time.Now().UnixNano() - stt log.Printf("SEEMS_OFF: updated nodes in %d ms, raw in %d ms", dur/1000/1000, durRaw/1000/1000) time.Sleep(30 * time.Minute) } }() return worker } func CMDUpdateNodes() error { sslDir := flag.String("ssl-dir", utils.HomeDirOrEmpty("/.chia/mainnet/ssl"), "path to chia/mainnet/ssl directory") flag.Parse() db := utils.MakePGConnection() gdb, gdb6, err := utils.MakeGeoIPConnection() if err != nil { return merry.Wrap(err) } dbNodeAddrs := make(chan *NodeAddr, 32) rawNodeChunks := make(chan []types.TimestampedPeerInfo, 16) nodesNoLoc := make(chan *Node, 16) rawNodesNoLoc := make(chan *NodeAddr, 16) nodesOut := make(chan *Node, 32) rawNodesOut := make(chan *NodeAddr, 256) workers := []utils.Worker{ // input startOldNodesLoader(db, dbNodeAddrs, 512), startNodesChecker(db, *sslDir, dbNodeAddrs, nodesNoLoc, rawNodeChunks, 256), startNodesListener(*sslDir, nodesNoLoc, rawNodeChunks), // process startRawNodesFilter(db, rawNodeChunks, rawNodesNoLoc), startNodesLocationChecker(gdb, gdb6, nodesNoLoc, nodesOut, rawNodesNoLoc, rawNodesOut, 32), // save startNodesSaver(db, nodesOut, 32), startRawNodesSaver(db, rawNodesOut, 512), // misc startSeemsOffUpdater(db), } logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("UPDATE: chans: (%d) -> (%d, %d -> %d) -> (%d, %d)", len(dbNodeAddrs), len(nodesNoLoc), len(rawNodeChunks), len(rawNodesNoLoc), len(nodesOut), len(rawNodesOut)) time.Sleep(10 * time.Second) }) for { for _, worker := range workers { if err := worker.PopError(); err != nil { return err } } logPrint.Trigger() time.Sleep(time.Second) } }
err := network.ListenOn("0.0.0.0", sslDir+"/ca/chia_ca.crt", sslDir+"/ca/chia_ca.key", nil, connHandler)
random_line_split
update.go
package nodes import ( "chiastat/chia/network" "chiastat/chia/types" chiautils "chiastat/chia/utils" "chiastat/utils" "context" "encoding/binary" "encoding/hex" "flag" "io" "log" "net" "net/http" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/abh/geoip" "github.com/ansel1/merry" "github.com/go-pg/pg/v10" pgtypes "github.com/go-pg/pg/v10/types" "github.com/gorilla/websocket" ) func joinHostPort(host string, port uint16) string { return net.JoinHostPort(host, strconv.Itoa(int(port))) } func joinHostPortToHashKey(host string, port uint16) string { var buf []byte if ip := net.ParseIP(host); ip != nil { if ip4 := ip.To4(); ip4 != nil { buf = make([]byte, 4+2) copy(buf, ip4) } else { buf = make([]byte, 16+2) copy(buf, ip) } } else { buf = make([]byte, len(host)+3) //extra zero as separator copy(buf, []byte(host)) } binary.BigEndian.PutUint16(buf[len(buf)-2:], port) return string(buf) } func askIP() (string, error) { resp, err := http.Get("https://checkip.amazonaws.com/") if err != nil { return "", merry.Wrap(err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", merry.Wrap(err) } return strings.TrimSpace(string(body)), nil } type ConnListItem struct { prev *ConnListItem next *ConnListItem conn *network.WSChiaConnection stop chan struct{} } type ConnList struct { limit int64 length int64 start *ConnListItem end *ConnListItem mutex sync.Mutex } func (l *ConnList) PushConn(c *network.WSChiaConnection) *ConnListItem { l.mutex.Lock() defer l.mutex.Unlock() item := &ConnListItem{prev: l.end, conn: c, stop: make(chan struct{}, 1)} if l.end != nil { l.end.next = item } l.end = item if l.start == nil { l.start = item } l.length += 1 return item } func (l *ConnList) delItemNoLock(item *ConnListItem) { next := item.next prev := item.prev if l.start == item { l.start = next } else { item.prev.next = next } if l.end == item { l.end = prev } else { item.next.prev = prev } item.prev = nil item.next = nil l.length -= 1 } func (l *ConnList) DelItemUnlesRemoved(item *ConnListItem) { l.mutex.Lock() defer l.mutex.Unlock() if item.prev == nil && item.next == nil && l.start != item { return } l.delItemNoLock(item) } func (l *ConnList) ShiftIfNeed() *ConnListItem { l.mutex.Lock() defer l.mutex.Unlock() if l.length <= l.limit { return nil } if l.start == nil
item := l.start l.delItemNoLock(item) return item } type NodeAddr struct { Host string Port uint16 Country *string } type Node struct { ID []byte Host string Port uint16 ProtocolVersion string SoftwareVersion string NodeType string Country *string } type NodeAddrListAsPGTuple []*NodeAddr func (l NodeAddrListAsPGTuple) AppendValue(b []byte, flags int) ([]byte, error) { // flags: https://github.com/go-pg/pg/blob/c9ee578a38d6866649072df18a3dbb36ff369747/types/flags.go for i, item := range l { if i > 0 { b = append(b, ',') } b = append(b, '(') b = pgtypes.AppendString(b, item.Host, 1) //quoteFlag=1 b = append(b, ',') b = append(b, []byte(strconv.FormatInt(int64(item.Port), 10))...) b = append(b, ')') } return b, nil } type NodeListAsPGIDs []*Node func (l NodeListAsPGIDs) AppendValue(b []byte, flags int) ([]byte, error) { // flags: https://github.com/go-pg/pg/blob/c9ee578a38d6866649072df18a3dbb36ff369747/types/flags.go idBuf := make([]byte, 80) for i, item := range l { if i > 0 { b = append(b, ',') } idLen := hex.Encode(idBuf, item.ID) b = append(b, []byte("'\\x")...) b = append(b, idBuf[:idLen]...) b = append(b, '\'') } return b, nil } func startOldNodesLoader(db *pg.DB, nodesChan chan *NodeAddr, chunkSize int) utils.Worker { worker := utils.NewSimpleWorker(1) rawNodesChunkSize := chunkSize nodesChunkSize := chunkSize / 8 if nodesChunkSize == 0 { nodesChunkSize = 1 } go func() { defer worker.Done() ctx := context.Background() nodes := make([]*Node, nodesChunkSize) rawNodes := make([]*NodeAddr, rawNodesChunkSize) for { nodes := nodes[:0] rawNodes := rawNodes[:0] var getDur, updDur int64 var getDurRaw, updDurRaw int64 err := db.RunInTransaction(ctx, func(tx *pg.Tx) error { stt := time.Now().UnixNano() _, err := tx.Query(&rawNodes, ` SELECT host, port FROM raw_nodes WHERE NOT seems_off AND (checked_at IS NULL OR checked_at < now() - INTERVAL '5 hours') ORDER BY checked_at ASC NULLS FIRST LIMIT ? FOR NO KEY UPDATE`, chunkSize) getDurRaw = time.Now().UnixNano() - stt if utils.IsPGDeadlock(err) { return nil } if err != nil { return merry.Wrap(err) } if len(rawNodes) == 0 { return nil } stt = time.Now().UnixNano() _, err = tx.Exec(` UPDATE raw_nodes SET checked_at = NOW() WHERE (host,port) IN (?)`, NodeAddrListAsPGTuple(rawNodes)) updDurRaw = time.Now().UnixNano() - stt return merry.Wrap(err) }) if err != nil { worker.AddError(err) return } err = db.RunInTransaction(ctx, func(tx *pg.Tx) error { stt := time.Now().UnixNano() _, err := tx.Query(&nodes, ` SELECT id, host, port FROM nodes WHERE NOT seems_off AND (checked_at IS NULL OR checked_at < now() - INTERVAL '5 hours') ORDER BY checked_at ASC NULLS FIRST LIMIT ? FOR NO KEY UPDATE`, chunkSize) getDur = time.Now().UnixNano() - stt if utils.IsPGDeadlock(err) { return nil } if err != nil { return merry.Wrap(err) } if len(nodes) == 0 { return nil } stt = time.Now().UnixNano() _, err = tx.Exec(` UPDATE nodes SET checked_at = NOW() WHERE id IN (?)`, NodeListAsPGIDs(nodes)) updDur = time.Now().UnixNano() - stt return merry.Wrap(err) }) if err != nil { worker.AddError(err) return } log.Printf("LOAD: nodes=%d (get:%dms, upd:%dms), raw=%d (get:%dms, upd:%dms)", len(nodes), getDur/1000/1000, updDur/1000/1000, len(rawNodes), getDurRaw/1000/1000, updDurRaw/1000/1000) if len(nodes) == 0 && len(rawNodes) == 0 { time.Sleep(10 * time.Second) } for _, node := range rawNodes { nodesChan <- node } for _, node := range nodes { nodesChan <- &NodeAddr{Host: node.Host, Port: node.Port} } } }() return worker } func startNodesChecker(db *pg.DB, sslDir string, nodesInChan chan *NodeAddr, nodesOutChan chan *Node, rawNodesOutChan chan []types.TimestampedPeerInfo, concurrency int) utils.Worker { worker := utils.NewSimpleWorker(concurrency) var totalCount int64 = 0 var totalCountOk int64 = 0 var stampCount int64 = 0 stamp := time.Now().UnixNano() logPrint := utils.NewSyncInterval(10*time.Second, func() { curStampCount := atomic.SwapInt64(&stampCount, 0) curStamp := atomic.SwapInt64(&stamp, time.Now().UnixNano()) log.Printf("CHECK: nodes checked: %d, ok: %d, rpm: %.2f", atomic.LoadInt64(&totalCount), atomic.LoadInt64(&totalCountOk), float64(curStampCount*60*1000*1000*1000)/float64(time.Now().UnixNano()-curStamp)) }) for i := 0; i < concurrency; i++ { go func() { defer worker.Done() tlsCfg, err := network.MakeTSLConfigFromFiles( sslDir+"/ca/chia_ca.crt", sslDir+"/full_node/public_full_node.crt", sslDir+"/full_node/public_full_node.key") if err != nil { worker.AddError(err) return } handleNode := func(node *NodeAddr) error { cfg := &network.WSChiaConnConfig{Dialer: &websocket.Dialer{ HandshakeTimeout: 5 * time.Second, }} c, err := network.ConnectTo(joinHostPort(node.Host, node.Port), tlsCfg, cfg) if err != nil { return merry.Wrap(err) } hs, err := c.PerformHandshake() if err != nil { return merry.Wrap(err) } c.StartRoutines() id := c.PeerID() nodeType, _ := types.NodeTypeName(hs.NodeType) if len(nodesOutChan) == cap(nodesOutChan) { time.Sleep(time.Millisecond) //throttling } nodesOutChan <- &Node{ ID: id[:], Host: c.RemoteAddr().(*net.TCPAddr).IP.String(), Port: hs.ServerPort, ProtocolVersion: hs.ProtocolVersion, SoftwareVersion: hs.SoftwareVersion, NodeType: nodeType, } for i := 0; i < 3; i++ { peers, err := c.RequestPeers() if err != nil { break } if len(rawNodesOutChan) == cap(rawNodesOutChan) { time.Sleep(time.Millisecond) //throttling } rawNodesOutChan <- peers.PeerList } return nil } for node := range nodesInChan { err := handleNode(node) if err == nil { atomic.AddInt64(&totalCountOk, 1) } atomic.AddInt64(&stampCount, 1) atomic.AddInt64(&totalCount, 1) logPrint.Trigger() } }() } return worker } func startRawNodesFilter(db *pg.DB, nodeChunksChan chan []types.TimestampedPeerInfo, nodesChan chan *NodeAddr) utils.Worker { worker := utils.NewSimpleWorker(2) cleanupInterval := int64(10 * 60) updateInterval := int64(60 * 60) go func() { defer worker.Done() nodeStamps := make(map[string]int64) lastCleanupStamp := time.Now().Unix() chunksCount := 0 countUsed := 0 countTotal := 0 logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("FILTER: use ratio: %.1f%%, raw nodes in filter: %d", float64(countUsed*100)/float64(countTotal), len(nodeStamps)) countUsed = 0 countTotal = 0 }) prefillNodes := make([]*NodeAddr, 500*1000) _, err := db.Query(&prefillNodes, ` SELECT host, port FROM raw_nodes WHERE NOT seems_off AND updated_at > now() - ? * INTERVAL '1 second' ORDER BY checked_at ASC NULLS FIRST LIMIT ?`, updateInterval, len(prefillNodes)) if err == nil { now := time.Now().Unix() for i, node := range prefillNodes { nodeStamps[joinHostPortToHashKey(node.Host, node.Port)] = now - updateInterval*int64(i)/int64(len(prefillNodes)) } log.Printf("FILTER: prefilled with %d node(s)", len(nodeStamps)) } else { log.Printf("FILTER: failed to prefill: %s", err) } for chunk := range nodeChunksChan { now := time.Now().Unix() if now-lastCleanupStamp > cleanupInterval { count := 0 for addr, stamp := range nodeStamps { if now-stamp > updateInterval { delete(nodeStamps, addr) count += 1 } } log.Printf("FILTER: raw nodes cleanup: %d removed, %d remaining", count, len(nodeStamps)) lastCleanupStamp = now } for _, node := range chunk { key := joinHostPortToHashKey(node.Host, node.Port) if stamp, ok := nodeStamps[key]; !ok || now-stamp > updateInterval { nodesChan <- &NodeAddr{Host: node.Host, Port: node.Port} nodeStamps[key] = now countUsed += 1 } } countTotal += len(chunk) chunksCount += 1 logPrint.Trigger() } }() return worker } func tryGetCountry(gdb, gdb6 *geoip.GeoIP, host string, tryResolve bool) *string { hostIP := net.ParseIP(host) if hostIP == nil { if tryResolve { addrs, _ := net.LookupHost(host) ipFound := false for _, addr := range addrs { hostIP = net.ParseIP(addr) if hostIP != nil { host = addr ipFound = true break } } if !ipFound { return nil } } else { return nil } } if hostIP.To4() == nil { if code, _ := gdb6.GetCountry_v6(host); code != "" { return &code } } else { if code, _ := gdb.GetCountry(host); code != "" { return &code } } return nil } func startNodesLocationChecker(gdb, gdb6 *geoip.GeoIP, nodesIn, nodesOut chan *Node, rawNodesIn, rawNodesOut chan *NodeAddr, numWorkers int) utils.Worker { worker := utils.NewSimpleWorker(2 * numWorkers) for i := 0; i < numWorkers; i++ { go func() { defer worker.Done() for node := range nodesIn { node.Country = tryGetCountry(gdb, gdb6, node.Host, true) nodesOut <- node } close(nodesOut) }() } for i := 0; i < numWorkers; i++ { go func() { defer worker.Done() for node := range rawNodesIn { node.Country = tryGetCountry(gdb, gdb6, node.Host, false) rawNodesOut <- node } close(rawNodesOut) }() } return worker } func startNodesSaver(db *pg.DB, nodesChan chan *Node, chunkSize int) utils.Worker { worker := utils.NewSimpleWorker(1) nodesChanI := make(chan interface{}, 16) count := 0 go func() { for node := range nodesChan { nodesChanI <- node } close(nodesChanI) }() go func() { defer worker.Done() var savesDurSum, savesDurCount int64 logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("SAVE: count: +%d (%d chunks, avg %d ms)", count, savesDurCount, savesDurSum/savesDurCount) savesDurSum = 0 savesDurCount = 0 count = 0 }) err := utils.SaveChunked(db, chunkSize, nodesChanI, func(tx *pg.Tx, items []interface{}) error { for _, nodeI := range items { node := nodeI.(*Node) _, err := tx.Exec(` INSERT INTO nodes (id, host, port, protocol_version, software_version, node_type, country, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, now()) ON CONFLICT (id) DO UPDATE SET host = EXCLUDED.host, port = EXCLUDED.port, protocol_version = EXCLUDED.protocol_version, software_version = EXCLUDED.software_version, node_type = EXCLUDED.node_type, country = EXCLUDED.country, seems_off = false, updated_at = now()`, node.ID, node.Host, node.Port, node.ProtocolVersion, node.SoftwareVersion, node.NodeType, node.Country, ) if err != nil { return merry.Wrap(err) } count += 1 } return nil }, func(saveDur time.Duration) { savesDurSum += int64(saveDur / time.Millisecond) savesDurCount += 1 logPrint.Trigger() }) log.Println("SAVE: done") if err != nil { worker.AddError(err) } }() return worker } func startRawNodesSaver(db *pg.DB, nodesChan chan *NodeAddr, chunkSize int) utils.Worker { worker := utils.NewSimpleWorker(1) nodesChanI := make(chan interface{}, 16) count := 0 go func() { for node := range nodesChan { nodesChanI <- node } close(nodesChanI) }() go func() { defer worker.Done() var savesDurSum, savesDurCount int64 logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("SAVE:RAW: count: +%d (%d chunks, avg %d ms)", count, savesDurCount, savesDurSum/savesDurCount) savesDurSum = 0 savesDurCount = 0 count = 0 }) err := utils.SaveChunked(db, chunkSize, nodesChanI, func(tx *pg.Tx, items []interface{}) error { for _, nodeI := range items { node := nodeI.(*NodeAddr) _, err := tx.Exec(` INSERT INTO raw_nodes (host, port, country, updated_at) VALUES (?, ?, ?, now()) ON CONFLICT (host, port) DO UPDATE SET country = EXCLUDED.country, seems_off = false, updated_at = now()`, node.Host, node.Port, node.Country, ) if err != nil { return merry.Wrap(err) } count += 1 } return nil }, func(saveDur time.Duration) { savesDurSum += int64(saveDur / time.Millisecond) savesDurCount += 1 logPrint.Trigger() }) log.Println("SAVE:RAW: done") if err != nil { worker.AddError(err) } }() return worker } func startNodesListener(sslDir string, nodesChan chan *Node, rawNodesChan chan []types.TimestampedPeerInfo) utils.Worker { worker := utils.NewSimpleWorker(1) type ConnListItem struct { prev *ConnListItem next *ConnListItem conn *network.WSChiaConnection stop chan struct{} } connList := ConnList{limit: 1024} go func() { defer worker.Done() var newConns, peersCount, unexpectedPeersCount int64 logPrint := utils.NewSyncInterval(10*time.Second, func() { peersCount := atomic.SwapInt64(&peersCount, 0) unexpCount := atomic.SwapInt64(&unexpectedPeersCount, 0) curNewConns := atomic.SwapInt64(&newConns, 0) log.Printf("LISTEN: conns: %d (+%d), peers: +%d, unexp.peers: +%d", atomic.LoadInt64(&connList.length), curNewConns, peersCount, unexpCount) }) connHandler := func(c *network.WSChiaConnection) { atomic.AddInt64(&newConns, 1) connItem := connList.PushConn(c) defer func() { connList.DelItemUnlesRemoved(connItem) c.Close() }() if item := connList.ShiftIfNeed(); item != nil { item.stop <- struct{}{} } shortID := c.PeerIDHex()[0:8] logPrint.Trigger() c.SetMessageHandler(func(msgID uint16, msg chiautils.FromBytes) { switch msg := msg.(type) { case *types.RequestPeers: c.SendReply(msgID, types.RespondPeers{PeerList: nil}) case *types.RespondPeers: atomic.AddInt64(&unexpectedPeersCount, int64(len(msg.PeerList))) rawNodesChan <- msg.PeerList case *types.RequestBlock: c.SendReply(msgID, types.RejectBlock{Height: msg.Height}) case *types.NewPeak, *types.NewCompactVDF, *types.NewSignagePointOrEndOfSubSlot, *types.NewUnfinishedBlock, *types.RequestMempoolTransactions, *types.NewTransaction: // do nothing default: log.Printf("LISTEN: %s: unexpected message: %#v", shortID, msg) } }) hs, err := c.PerformHandshake() if err != nil { log.Printf("LISTEN: %s: handshake error: %s", shortID, err) c.Close() return } c.StartRoutines() id := c.PeerID() nodeType, _ := types.NodeTypeName(hs.NodeType) nodesChan <- &Node{ ID: id[:], Host: c.RemoteAddr().(*net.TCPAddr).IP.String(), Port: hs.ServerPort, ProtocolVersion: hs.ProtocolVersion, SoftwareVersion: hs.SoftwareVersion, NodeType: nodeType, } for i := 0; ; i++ { peers, err := c.RequestPeers() if err != nil { log.Printf("LISTEN: %s: peers error: %s", shortID, err) return } timer := time.NewTimer(30 * time.Second) select { case rawNodesChan <- peers.PeerList: atomic.AddInt64(&peersCount, int64(len(peers.PeerList))) timer.Stop() case <-timer.C: log.Printf("LISTEN: %s: timeout adding peers chunk", shortID) } if i%60 == 59 { ip, err := askIP() if err == nil { c.Send(types.RespondPeers{PeerList: []types.TimestampedPeerInfo{ {Host: ip, Port: network.SERVER_PORT, Timestamp: uint64(time.Now().Unix())}, }}) } else { log.Printf("LISTEN: %s: ask IP error: %s", shortID, err) } } logPrint.Trigger() timer = time.NewTimer(5 * time.Minute) select { case <-connItem.stop: timer.Stop() return case <-timer.C: // } } } err := network.ListenOn("0.0.0.0", sslDir+"/ca/chia_ca.crt", sslDir+"/ca/chia_ca.key", nil, connHandler) if err != nil { worker.AddError(err) } }() return worker } func startSeemsOffUpdater(db *pg.DB) utils.Worker { worker := utils.NewSimpleWorker(1) go func() { defer worker.Done() for { stt := time.Now().UnixNano() _, err := db.Exec(` UPDATE nodes SET seems_off = true WHERE NOT seems_off AND checked_at IS NOT NULL AND updated_at < NOW() - INTERVAL '7 days'`) if err != nil { log.Printf("SEEMS_OFF:RAW: error: %s", err) } dur := time.Now().UnixNano() - stt stt = time.Now().UnixNano() _, err = db.Exec(` UPDATE raw_nodes SET seems_off = true WHERE NOT seems_off AND checked_at IS NOT NULL AND updated_at < NOW() - INTERVAL '7 days'`) if err != nil { log.Printf("SEEMS_OFF: raw error: %s", err) } durRaw := time.Now().UnixNano() - stt log.Printf("SEEMS_OFF: updated nodes in %d ms, raw in %d ms", dur/1000/1000, durRaw/1000/1000) time.Sleep(30 * time.Minute) } }() return worker } func CMDUpdateNodes() error { sslDir := flag.String("ssl-dir", utils.HomeDirOrEmpty("/.chia/mainnet/ssl"), "path to chia/mainnet/ssl directory") flag.Parse() db := utils.MakePGConnection() gdb, gdb6, err := utils.MakeGeoIPConnection() if err != nil { return merry.Wrap(err) } dbNodeAddrs := make(chan *NodeAddr, 32) rawNodeChunks := make(chan []types.TimestampedPeerInfo, 16) nodesNoLoc := make(chan *Node, 16) rawNodesNoLoc := make(chan *NodeAddr, 16) nodesOut := make(chan *Node, 32) rawNodesOut := make(chan *NodeAddr, 256) workers := []utils.Worker{ // input startOldNodesLoader(db, dbNodeAddrs, 512), startNodesChecker(db, *sslDir, dbNodeAddrs, nodesNoLoc, rawNodeChunks, 256), startNodesListener(*sslDir, nodesNoLoc, rawNodeChunks), // process startRawNodesFilter(db, rawNodeChunks, rawNodesNoLoc), startNodesLocationChecker(gdb, gdb6, nodesNoLoc, nodesOut, rawNodesNoLoc, rawNodesOut, 32), // save startNodesSaver(db, nodesOut, 32), startRawNodesSaver(db, rawNodesOut, 512), // misc startSeemsOffUpdater(db), } logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("UPDATE: chans: (%d) -> (%d, %d -> %d) -> (%d, %d)", len(dbNodeAddrs), len(nodesNoLoc), len(rawNodeChunks), len(rawNodesNoLoc), len(nodesOut), len(rawNodesOut)) time.Sleep(10 * time.Second) }) for { for _, worker := range workers { if err := worker.PopError(); err != nil { return err } } logPrint.Trigger() time.Sleep(time.Second) } }
{ return nil }
conditional_block
update.go
package nodes import ( "chiastat/chia/network" "chiastat/chia/types" chiautils "chiastat/chia/utils" "chiastat/utils" "context" "encoding/binary" "encoding/hex" "flag" "io" "log" "net" "net/http" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/abh/geoip" "github.com/ansel1/merry" "github.com/go-pg/pg/v10" pgtypes "github.com/go-pg/pg/v10/types" "github.com/gorilla/websocket" ) func joinHostPort(host string, port uint16) string { return net.JoinHostPort(host, strconv.Itoa(int(port))) } func joinHostPortToHashKey(host string, port uint16) string { var buf []byte if ip := net.ParseIP(host); ip != nil { if ip4 := ip.To4(); ip4 != nil { buf = make([]byte, 4+2) copy(buf, ip4) } else { buf = make([]byte, 16+2) copy(buf, ip) } } else { buf = make([]byte, len(host)+3) //extra zero as separator copy(buf, []byte(host)) } binary.BigEndian.PutUint16(buf[len(buf)-2:], port) return string(buf) } func askIP() (string, error) { resp, err := http.Get("https://checkip.amazonaws.com/") if err != nil { return "", merry.Wrap(err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", merry.Wrap(err) } return strings.TrimSpace(string(body)), nil } type ConnListItem struct { prev *ConnListItem next *ConnListItem conn *network.WSChiaConnection stop chan struct{} } type ConnList struct { limit int64 length int64 start *ConnListItem end *ConnListItem mutex sync.Mutex } func (l *ConnList) PushConn(c *network.WSChiaConnection) *ConnListItem { l.mutex.Lock() defer l.mutex.Unlock() item := &ConnListItem{prev: l.end, conn: c, stop: make(chan struct{}, 1)} if l.end != nil { l.end.next = item } l.end = item if l.start == nil { l.start = item } l.length += 1 return item } func (l *ConnList) delItemNoLock(item *ConnListItem) { next := item.next prev := item.prev if l.start == item { l.start = next } else { item.prev.next = next } if l.end == item { l.end = prev } else { item.next.prev = prev } item.prev = nil item.next = nil l.length -= 1 } func (l *ConnList) DelItemUnlesRemoved(item *ConnListItem) { l.mutex.Lock() defer l.mutex.Unlock() if item.prev == nil && item.next == nil && l.start != item { return } l.delItemNoLock(item) } func (l *ConnList) ShiftIfNeed() *ConnListItem { l.mutex.Lock() defer l.mutex.Unlock() if l.length <= l.limit { return nil } if l.start == nil { return nil } item := l.start l.delItemNoLock(item) return item } type NodeAddr struct { Host string Port uint16 Country *string } type Node struct { ID []byte Host string Port uint16 ProtocolVersion string SoftwareVersion string NodeType string Country *string } type NodeAddrListAsPGTuple []*NodeAddr func (l NodeAddrListAsPGTuple) AppendValue(b []byte, flags int) ([]byte, error) { // flags: https://github.com/go-pg/pg/blob/c9ee578a38d6866649072df18a3dbb36ff369747/types/flags.go for i, item := range l { if i > 0 { b = append(b, ',') } b = append(b, '(') b = pgtypes.AppendString(b, item.Host, 1) //quoteFlag=1 b = append(b, ',') b = append(b, []byte(strconv.FormatInt(int64(item.Port), 10))...) b = append(b, ')') } return b, nil } type NodeListAsPGIDs []*Node func (l NodeListAsPGIDs) AppendValue(b []byte, flags int) ([]byte, error) { // flags: https://github.com/go-pg/pg/blob/c9ee578a38d6866649072df18a3dbb36ff369747/types/flags.go idBuf := make([]byte, 80) for i, item := range l { if i > 0 { b = append(b, ',') } idLen := hex.Encode(idBuf, item.ID) b = append(b, []byte("'\\x")...) b = append(b, idBuf[:idLen]...) b = append(b, '\'') } return b, nil } func startOldNodesLoader(db *pg.DB, nodesChan chan *NodeAddr, chunkSize int) utils.Worker { worker := utils.NewSimpleWorker(1) rawNodesChunkSize := chunkSize nodesChunkSize := chunkSize / 8 if nodesChunkSize == 0 { nodesChunkSize = 1 } go func() { defer worker.Done() ctx := context.Background() nodes := make([]*Node, nodesChunkSize) rawNodes := make([]*NodeAddr, rawNodesChunkSize) for { nodes := nodes[:0] rawNodes := rawNodes[:0] var getDur, updDur int64 var getDurRaw, updDurRaw int64 err := db.RunInTransaction(ctx, func(tx *pg.Tx) error { stt := time.Now().UnixNano() _, err := tx.Query(&rawNodes, ` SELECT host, port FROM raw_nodes WHERE NOT seems_off AND (checked_at IS NULL OR checked_at < now() - INTERVAL '5 hours') ORDER BY checked_at ASC NULLS FIRST LIMIT ? FOR NO KEY UPDATE`, chunkSize) getDurRaw = time.Now().UnixNano() - stt if utils.IsPGDeadlock(err) { return nil } if err != nil { return merry.Wrap(err) } if len(rawNodes) == 0 { return nil } stt = time.Now().UnixNano() _, err = tx.Exec(` UPDATE raw_nodes SET checked_at = NOW() WHERE (host,port) IN (?)`, NodeAddrListAsPGTuple(rawNodes)) updDurRaw = time.Now().UnixNano() - stt return merry.Wrap(err) }) if err != nil { worker.AddError(err) return } err = db.RunInTransaction(ctx, func(tx *pg.Tx) error { stt := time.Now().UnixNano() _, err := tx.Query(&nodes, ` SELECT id, host, port FROM nodes WHERE NOT seems_off AND (checked_at IS NULL OR checked_at < now() - INTERVAL '5 hours') ORDER BY checked_at ASC NULLS FIRST LIMIT ? FOR NO KEY UPDATE`, chunkSize) getDur = time.Now().UnixNano() - stt if utils.IsPGDeadlock(err) { return nil } if err != nil { return merry.Wrap(err) } if len(nodes) == 0 { return nil } stt = time.Now().UnixNano() _, err = tx.Exec(` UPDATE nodes SET checked_at = NOW() WHERE id IN (?)`, NodeListAsPGIDs(nodes)) updDur = time.Now().UnixNano() - stt return merry.Wrap(err) }) if err != nil { worker.AddError(err) return } log.Printf("LOAD: nodes=%d (get:%dms, upd:%dms), raw=%d (get:%dms, upd:%dms)", len(nodes), getDur/1000/1000, updDur/1000/1000, len(rawNodes), getDurRaw/1000/1000, updDurRaw/1000/1000) if len(nodes) == 0 && len(rawNodes) == 0 { time.Sleep(10 * time.Second) } for _, node := range rawNodes { nodesChan <- node } for _, node := range nodes { nodesChan <- &NodeAddr{Host: node.Host, Port: node.Port} } } }() return worker } func startNodesChecker(db *pg.DB, sslDir string, nodesInChan chan *NodeAddr, nodesOutChan chan *Node, rawNodesOutChan chan []types.TimestampedPeerInfo, concurrency int) utils.Worker { worker := utils.NewSimpleWorker(concurrency) var totalCount int64 = 0 var totalCountOk int64 = 0 var stampCount int64 = 0 stamp := time.Now().UnixNano() logPrint := utils.NewSyncInterval(10*time.Second, func() { curStampCount := atomic.SwapInt64(&stampCount, 0) curStamp := atomic.SwapInt64(&stamp, time.Now().UnixNano()) log.Printf("CHECK: nodes checked: %d, ok: %d, rpm: %.2f", atomic.LoadInt64(&totalCount), atomic.LoadInt64(&totalCountOk), float64(curStampCount*60*1000*1000*1000)/float64(time.Now().UnixNano()-curStamp)) }) for i := 0; i < concurrency; i++ { go func() { defer worker.Done() tlsCfg, err := network.MakeTSLConfigFromFiles( sslDir+"/ca/chia_ca.crt", sslDir+"/full_node/public_full_node.crt", sslDir+"/full_node/public_full_node.key") if err != nil { worker.AddError(err) return } handleNode := func(node *NodeAddr) error { cfg := &network.WSChiaConnConfig{Dialer: &websocket.Dialer{ HandshakeTimeout: 5 * time.Second, }} c, err := network.ConnectTo(joinHostPort(node.Host, node.Port), tlsCfg, cfg) if err != nil { return merry.Wrap(err) } hs, err := c.PerformHandshake() if err != nil { return merry.Wrap(err) } c.StartRoutines() id := c.PeerID() nodeType, _ := types.NodeTypeName(hs.NodeType) if len(nodesOutChan) == cap(nodesOutChan) { time.Sleep(time.Millisecond) //throttling } nodesOutChan <- &Node{ ID: id[:], Host: c.RemoteAddr().(*net.TCPAddr).IP.String(), Port: hs.ServerPort, ProtocolVersion: hs.ProtocolVersion, SoftwareVersion: hs.SoftwareVersion, NodeType: nodeType, } for i := 0; i < 3; i++ { peers, err := c.RequestPeers() if err != nil { break } if len(rawNodesOutChan) == cap(rawNodesOutChan) { time.Sleep(time.Millisecond) //throttling } rawNodesOutChan <- peers.PeerList } return nil } for node := range nodesInChan { err := handleNode(node) if err == nil { atomic.AddInt64(&totalCountOk, 1) } atomic.AddInt64(&stampCount, 1) atomic.AddInt64(&totalCount, 1) logPrint.Trigger() } }() } return worker } func startRawNodesFilter(db *pg.DB, nodeChunksChan chan []types.TimestampedPeerInfo, nodesChan chan *NodeAddr) utils.Worker { worker := utils.NewSimpleWorker(2) cleanupInterval := int64(10 * 60) updateInterval := int64(60 * 60) go func() { defer worker.Done() nodeStamps := make(map[string]int64) lastCleanupStamp := time.Now().Unix() chunksCount := 0 countUsed := 0 countTotal := 0 logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("FILTER: use ratio: %.1f%%, raw nodes in filter: %d", float64(countUsed*100)/float64(countTotal), len(nodeStamps)) countUsed = 0 countTotal = 0 }) prefillNodes := make([]*NodeAddr, 500*1000) _, err := db.Query(&prefillNodes, ` SELECT host, port FROM raw_nodes WHERE NOT seems_off AND updated_at > now() - ? * INTERVAL '1 second' ORDER BY checked_at ASC NULLS FIRST LIMIT ?`, updateInterval, len(prefillNodes)) if err == nil { now := time.Now().Unix() for i, node := range prefillNodes { nodeStamps[joinHostPortToHashKey(node.Host, node.Port)] = now - updateInterval*int64(i)/int64(len(prefillNodes)) } log.Printf("FILTER: prefilled with %d node(s)", len(nodeStamps)) } else { log.Printf("FILTER: failed to prefill: %s", err) } for chunk := range nodeChunksChan { now := time.Now().Unix() if now-lastCleanupStamp > cleanupInterval { count := 0 for addr, stamp := range nodeStamps { if now-stamp > updateInterval { delete(nodeStamps, addr) count += 1 } } log.Printf("FILTER: raw nodes cleanup: %d removed, %d remaining", count, len(nodeStamps)) lastCleanupStamp = now } for _, node := range chunk { key := joinHostPortToHashKey(node.Host, node.Port) if stamp, ok := nodeStamps[key]; !ok || now-stamp > updateInterval { nodesChan <- &NodeAddr{Host: node.Host, Port: node.Port} nodeStamps[key] = now countUsed += 1 } } countTotal += len(chunk) chunksCount += 1 logPrint.Trigger() } }() return worker } func tryGetCountry(gdb, gdb6 *geoip.GeoIP, host string, tryResolve bool) *string { hostIP := net.ParseIP(host) if hostIP == nil { if tryResolve { addrs, _ := net.LookupHost(host) ipFound := false for _, addr := range addrs { hostIP = net.ParseIP(addr) if hostIP != nil { host = addr ipFound = true break } } if !ipFound { return nil } } else { return nil } } if hostIP.To4() == nil { if code, _ := gdb6.GetCountry_v6(host); code != "" { return &code } } else { if code, _ := gdb.GetCountry(host); code != "" { return &code } } return nil } func startNodesLocationChecker(gdb, gdb6 *geoip.GeoIP, nodesIn, nodesOut chan *Node, rawNodesIn, rawNodesOut chan *NodeAddr, numWorkers int) utils.Worker
func startNodesSaver(db *pg.DB, nodesChan chan *Node, chunkSize int) utils.Worker { worker := utils.NewSimpleWorker(1) nodesChanI := make(chan interface{}, 16) count := 0 go func() { for node := range nodesChan { nodesChanI <- node } close(nodesChanI) }() go func() { defer worker.Done() var savesDurSum, savesDurCount int64 logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("SAVE: count: +%d (%d chunks, avg %d ms)", count, savesDurCount, savesDurSum/savesDurCount) savesDurSum = 0 savesDurCount = 0 count = 0 }) err := utils.SaveChunked(db, chunkSize, nodesChanI, func(tx *pg.Tx, items []interface{}) error { for _, nodeI := range items { node := nodeI.(*Node) _, err := tx.Exec(` INSERT INTO nodes (id, host, port, protocol_version, software_version, node_type, country, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, now()) ON CONFLICT (id) DO UPDATE SET host = EXCLUDED.host, port = EXCLUDED.port, protocol_version = EXCLUDED.protocol_version, software_version = EXCLUDED.software_version, node_type = EXCLUDED.node_type, country = EXCLUDED.country, seems_off = false, updated_at = now()`, node.ID, node.Host, node.Port, node.ProtocolVersion, node.SoftwareVersion, node.NodeType, node.Country, ) if err != nil { return merry.Wrap(err) } count += 1 } return nil }, func(saveDur time.Duration) { savesDurSum += int64(saveDur / time.Millisecond) savesDurCount += 1 logPrint.Trigger() }) log.Println("SAVE: done") if err != nil { worker.AddError(err) } }() return worker } func startRawNodesSaver(db *pg.DB, nodesChan chan *NodeAddr, chunkSize int) utils.Worker { worker := utils.NewSimpleWorker(1) nodesChanI := make(chan interface{}, 16) count := 0 go func() { for node := range nodesChan { nodesChanI <- node } close(nodesChanI) }() go func() { defer worker.Done() var savesDurSum, savesDurCount int64 logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("SAVE:RAW: count: +%d (%d chunks, avg %d ms)", count, savesDurCount, savesDurSum/savesDurCount) savesDurSum = 0 savesDurCount = 0 count = 0 }) err := utils.SaveChunked(db, chunkSize, nodesChanI, func(tx *pg.Tx, items []interface{}) error { for _, nodeI := range items { node := nodeI.(*NodeAddr) _, err := tx.Exec(` INSERT INTO raw_nodes (host, port, country, updated_at) VALUES (?, ?, ?, now()) ON CONFLICT (host, port) DO UPDATE SET country = EXCLUDED.country, seems_off = false, updated_at = now()`, node.Host, node.Port, node.Country, ) if err != nil { return merry.Wrap(err) } count += 1 } return nil }, func(saveDur time.Duration) { savesDurSum += int64(saveDur / time.Millisecond) savesDurCount += 1 logPrint.Trigger() }) log.Println("SAVE:RAW: done") if err != nil { worker.AddError(err) } }() return worker } func startNodesListener(sslDir string, nodesChan chan *Node, rawNodesChan chan []types.TimestampedPeerInfo) utils.Worker { worker := utils.NewSimpleWorker(1) type ConnListItem struct { prev *ConnListItem next *ConnListItem conn *network.WSChiaConnection stop chan struct{} } connList := ConnList{limit: 1024} go func() { defer worker.Done() var newConns, peersCount, unexpectedPeersCount int64 logPrint := utils.NewSyncInterval(10*time.Second, func() { peersCount := atomic.SwapInt64(&peersCount, 0) unexpCount := atomic.SwapInt64(&unexpectedPeersCount, 0) curNewConns := atomic.SwapInt64(&newConns, 0) log.Printf("LISTEN: conns: %d (+%d), peers: +%d, unexp.peers: +%d", atomic.LoadInt64(&connList.length), curNewConns, peersCount, unexpCount) }) connHandler := func(c *network.WSChiaConnection) { atomic.AddInt64(&newConns, 1) connItem := connList.PushConn(c) defer func() { connList.DelItemUnlesRemoved(connItem) c.Close() }() if item := connList.ShiftIfNeed(); item != nil { item.stop <- struct{}{} } shortID := c.PeerIDHex()[0:8] logPrint.Trigger() c.SetMessageHandler(func(msgID uint16, msg chiautils.FromBytes) { switch msg := msg.(type) { case *types.RequestPeers: c.SendReply(msgID, types.RespondPeers{PeerList: nil}) case *types.RespondPeers: atomic.AddInt64(&unexpectedPeersCount, int64(len(msg.PeerList))) rawNodesChan <- msg.PeerList case *types.RequestBlock: c.SendReply(msgID, types.RejectBlock{Height: msg.Height}) case *types.NewPeak, *types.NewCompactVDF, *types.NewSignagePointOrEndOfSubSlot, *types.NewUnfinishedBlock, *types.RequestMempoolTransactions, *types.NewTransaction: // do nothing default: log.Printf("LISTEN: %s: unexpected message: %#v", shortID, msg) } }) hs, err := c.PerformHandshake() if err != nil { log.Printf("LISTEN: %s: handshake error: %s", shortID, err) c.Close() return } c.StartRoutines() id := c.PeerID() nodeType, _ := types.NodeTypeName(hs.NodeType) nodesChan <- &Node{ ID: id[:], Host: c.RemoteAddr().(*net.TCPAddr).IP.String(), Port: hs.ServerPort, ProtocolVersion: hs.ProtocolVersion, SoftwareVersion: hs.SoftwareVersion, NodeType: nodeType, } for i := 0; ; i++ { peers, err := c.RequestPeers() if err != nil { log.Printf("LISTEN: %s: peers error: %s", shortID, err) return } timer := time.NewTimer(30 * time.Second) select { case rawNodesChan <- peers.PeerList: atomic.AddInt64(&peersCount, int64(len(peers.PeerList))) timer.Stop() case <-timer.C: log.Printf("LISTEN: %s: timeout adding peers chunk", shortID) } if i%60 == 59 { ip, err := askIP() if err == nil { c.Send(types.RespondPeers{PeerList: []types.TimestampedPeerInfo{ {Host: ip, Port: network.SERVER_PORT, Timestamp: uint64(time.Now().Unix())}, }}) } else { log.Printf("LISTEN: %s: ask IP error: %s", shortID, err) } } logPrint.Trigger() timer = time.NewTimer(5 * time.Minute) select { case <-connItem.stop: timer.Stop() return case <-timer.C: // } } } err := network.ListenOn("0.0.0.0", sslDir+"/ca/chia_ca.crt", sslDir+"/ca/chia_ca.key", nil, connHandler) if err != nil { worker.AddError(err) } }() return worker } func startSeemsOffUpdater(db *pg.DB) utils.Worker { worker := utils.NewSimpleWorker(1) go func() { defer worker.Done() for { stt := time.Now().UnixNano() _, err := db.Exec(` UPDATE nodes SET seems_off = true WHERE NOT seems_off AND checked_at IS NOT NULL AND updated_at < NOW() - INTERVAL '7 days'`) if err != nil { log.Printf("SEEMS_OFF:RAW: error: %s", err) } dur := time.Now().UnixNano() - stt stt = time.Now().UnixNano() _, err = db.Exec(` UPDATE raw_nodes SET seems_off = true WHERE NOT seems_off AND checked_at IS NOT NULL AND updated_at < NOW() - INTERVAL '7 days'`) if err != nil { log.Printf("SEEMS_OFF: raw error: %s", err) } durRaw := time.Now().UnixNano() - stt log.Printf("SEEMS_OFF: updated nodes in %d ms, raw in %d ms", dur/1000/1000, durRaw/1000/1000) time.Sleep(30 * time.Minute) } }() return worker } func CMDUpdateNodes() error { sslDir := flag.String("ssl-dir", utils.HomeDirOrEmpty("/.chia/mainnet/ssl"), "path to chia/mainnet/ssl directory") flag.Parse() db := utils.MakePGConnection() gdb, gdb6, err := utils.MakeGeoIPConnection() if err != nil { return merry.Wrap(err) } dbNodeAddrs := make(chan *NodeAddr, 32) rawNodeChunks := make(chan []types.TimestampedPeerInfo, 16) nodesNoLoc := make(chan *Node, 16) rawNodesNoLoc := make(chan *NodeAddr, 16) nodesOut := make(chan *Node, 32) rawNodesOut := make(chan *NodeAddr, 256) workers := []utils.Worker{ // input startOldNodesLoader(db, dbNodeAddrs, 512), startNodesChecker(db, *sslDir, dbNodeAddrs, nodesNoLoc, rawNodeChunks, 256), startNodesListener(*sslDir, nodesNoLoc, rawNodeChunks), // process startRawNodesFilter(db, rawNodeChunks, rawNodesNoLoc), startNodesLocationChecker(gdb, gdb6, nodesNoLoc, nodesOut, rawNodesNoLoc, rawNodesOut, 32), // save startNodesSaver(db, nodesOut, 32), startRawNodesSaver(db, rawNodesOut, 512), // misc startSeemsOffUpdater(db), } logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("UPDATE: chans: (%d) -> (%d, %d -> %d) -> (%d, %d)", len(dbNodeAddrs), len(nodesNoLoc), len(rawNodeChunks), len(rawNodesNoLoc), len(nodesOut), len(rawNodesOut)) time.Sleep(10 * time.Second) }) for { for _, worker := range workers { if err := worker.PopError(); err != nil { return err } } logPrint.Trigger() time.Sleep(time.Second) } }
{ worker := utils.NewSimpleWorker(2 * numWorkers) for i := 0; i < numWorkers; i++ { go func() { defer worker.Done() for node := range nodesIn { node.Country = tryGetCountry(gdb, gdb6, node.Host, true) nodesOut <- node } close(nodesOut) }() } for i := 0; i < numWorkers; i++ { go func() { defer worker.Done() for node := range rawNodesIn { node.Country = tryGetCountry(gdb, gdb6, node.Host, false) rawNodesOut <- node } close(rawNodesOut) }() } return worker }
identifier_body
update.go
package nodes import ( "chiastat/chia/network" "chiastat/chia/types" chiautils "chiastat/chia/utils" "chiastat/utils" "context" "encoding/binary" "encoding/hex" "flag" "io" "log" "net" "net/http" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/abh/geoip" "github.com/ansel1/merry" "github.com/go-pg/pg/v10" pgtypes "github.com/go-pg/pg/v10/types" "github.com/gorilla/websocket" ) func joinHostPort(host string, port uint16) string { return net.JoinHostPort(host, strconv.Itoa(int(port))) } func joinHostPortToHashKey(host string, port uint16) string { var buf []byte if ip := net.ParseIP(host); ip != nil { if ip4 := ip.To4(); ip4 != nil { buf = make([]byte, 4+2) copy(buf, ip4) } else { buf = make([]byte, 16+2) copy(buf, ip) } } else { buf = make([]byte, len(host)+3) //extra zero as separator copy(buf, []byte(host)) } binary.BigEndian.PutUint16(buf[len(buf)-2:], port) return string(buf) } func
() (string, error) { resp, err := http.Get("https://checkip.amazonaws.com/") if err != nil { return "", merry.Wrap(err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", merry.Wrap(err) } return strings.TrimSpace(string(body)), nil } type ConnListItem struct { prev *ConnListItem next *ConnListItem conn *network.WSChiaConnection stop chan struct{} } type ConnList struct { limit int64 length int64 start *ConnListItem end *ConnListItem mutex sync.Mutex } func (l *ConnList) PushConn(c *network.WSChiaConnection) *ConnListItem { l.mutex.Lock() defer l.mutex.Unlock() item := &ConnListItem{prev: l.end, conn: c, stop: make(chan struct{}, 1)} if l.end != nil { l.end.next = item } l.end = item if l.start == nil { l.start = item } l.length += 1 return item } func (l *ConnList) delItemNoLock(item *ConnListItem) { next := item.next prev := item.prev if l.start == item { l.start = next } else { item.prev.next = next } if l.end == item { l.end = prev } else { item.next.prev = prev } item.prev = nil item.next = nil l.length -= 1 } func (l *ConnList) DelItemUnlesRemoved(item *ConnListItem) { l.mutex.Lock() defer l.mutex.Unlock() if item.prev == nil && item.next == nil && l.start != item { return } l.delItemNoLock(item) } func (l *ConnList) ShiftIfNeed() *ConnListItem { l.mutex.Lock() defer l.mutex.Unlock() if l.length <= l.limit { return nil } if l.start == nil { return nil } item := l.start l.delItemNoLock(item) return item } type NodeAddr struct { Host string Port uint16 Country *string } type Node struct { ID []byte Host string Port uint16 ProtocolVersion string SoftwareVersion string NodeType string Country *string } type NodeAddrListAsPGTuple []*NodeAddr func (l NodeAddrListAsPGTuple) AppendValue(b []byte, flags int) ([]byte, error) { // flags: https://github.com/go-pg/pg/blob/c9ee578a38d6866649072df18a3dbb36ff369747/types/flags.go for i, item := range l { if i > 0 { b = append(b, ',') } b = append(b, '(') b = pgtypes.AppendString(b, item.Host, 1) //quoteFlag=1 b = append(b, ',') b = append(b, []byte(strconv.FormatInt(int64(item.Port), 10))...) b = append(b, ')') } return b, nil } type NodeListAsPGIDs []*Node func (l NodeListAsPGIDs) AppendValue(b []byte, flags int) ([]byte, error) { // flags: https://github.com/go-pg/pg/blob/c9ee578a38d6866649072df18a3dbb36ff369747/types/flags.go idBuf := make([]byte, 80) for i, item := range l { if i > 0 { b = append(b, ',') } idLen := hex.Encode(idBuf, item.ID) b = append(b, []byte("'\\x")...) b = append(b, idBuf[:idLen]...) b = append(b, '\'') } return b, nil } func startOldNodesLoader(db *pg.DB, nodesChan chan *NodeAddr, chunkSize int) utils.Worker { worker := utils.NewSimpleWorker(1) rawNodesChunkSize := chunkSize nodesChunkSize := chunkSize / 8 if nodesChunkSize == 0 { nodesChunkSize = 1 } go func() { defer worker.Done() ctx := context.Background() nodes := make([]*Node, nodesChunkSize) rawNodes := make([]*NodeAddr, rawNodesChunkSize) for { nodes := nodes[:0] rawNodes := rawNodes[:0] var getDur, updDur int64 var getDurRaw, updDurRaw int64 err := db.RunInTransaction(ctx, func(tx *pg.Tx) error { stt := time.Now().UnixNano() _, err := tx.Query(&rawNodes, ` SELECT host, port FROM raw_nodes WHERE NOT seems_off AND (checked_at IS NULL OR checked_at < now() - INTERVAL '5 hours') ORDER BY checked_at ASC NULLS FIRST LIMIT ? FOR NO KEY UPDATE`, chunkSize) getDurRaw = time.Now().UnixNano() - stt if utils.IsPGDeadlock(err) { return nil } if err != nil { return merry.Wrap(err) } if len(rawNodes) == 0 { return nil } stt = time.Now().UnixNano() _, err = tx.Exec(` UPDATE raw_nodes SET checked_at = NOW() WHERE (host,port) IN (?)`, NodeAddrListAsPGTuple(rawNodes)) updDurRaw = time.Now().UnixNano() - stt return merry.Wrap(err) }) if err != nil { worker.AddError(err) return } err = db.RunInTransaction(ctx, func(tx *pg.Tx) error { stt := time.Now().UnixNano() _, err := tx.Query(&nodes, ` SELECT id, host, port FROM nodes WHERE NOT seems_off AND (checked_at IS NULL OR checked_at < now() - INTERVAL '5 hours') ORDER BY checked_at ASC NULLS FIRST LIMIT ? FOR NO KEY UPDATE`, chunkSize) getDur = time.Now().UnixNano() - stt if utils.IsPGDeadlock(err) { return nil } if err != nil { return merry.Wrap(err) } if len(nodes) == 0 { return nil } stt = time.Now().UnixNano() _, err = tx.Exec(` UPDATE nodes SET checked_at = NOW() WHERE id IN (?)`, NodeListAsPGIDs(nodes)) updDur = time.Now().UnixNano() - stt return merry.Wrap(err) }) if err != nil { worker.AddError(err) return } log.Printf("LOAD: nodes=%d (get:%dms, upd:%dms), raw=%d (get:%dms, upd:%dms)", len(nodes), getDur/1000/1000, updDur/1000/1000, len(rawNodes), getDurRaw/1000/1000, updDurRaw/1000/1000) if len(nodes) == 0 && len(rawNodes) == 0 { time.Sleep(10 * time.Second) } for _, node := range rawNodes { nodesChan <- node } for _, node := range nodes { nodesChan <- &NodeAddr{Host: node.Host, Port: node.Port} } } }() return worker } func startNodesChecker(db *pg.DB, sslDir string, nodesInChan chan *NodeAddr, nodesOutChan chan *Node, rawNodesOutChan chan []types.TimestampedPeerInfo, concurrency int) utils.Worker { worker := utils.NewSimpleWorker(concurrency) var totalCount int64 = 0 var totalCountOk int64 = 0 var stampCount int64 = 0 stamp := time.Now().UnixNano() logPrint := utils.NewSyncInterval(10*time.Second, func() { curStampCount := atomic.SwapInt64(&stampCount, 0) curStamp := atomic.SwapInt64(&stamp, time.Now().UnixNano()) log.Printf("CHECK: nodes checked: %d, ok: %d, rpm: %.2f", atomic.LoadInt64(&totalCount), atomic.LoadInt64(&totalCountOk), float64(curStampCount*60*1000*1000*1000)/float64(time.Now().UnixNano()-curStamp)) }) for i := 0; i < concurrency; i++ { go func() { defer worker.Done() tlsCfg, err := network.MakeTSLConfigFromFiles( sslDir+"/ca/chia_ca.crt", sslDir+"/full_node/public_full_node.crt", sslDir+"/full_node/public_full_node.key") if err != nil { worker.AddError(err) return } handleNode := func(node *NodeAddr) error { cfg := &network.WSChiaConnConfig{Dialer: &websocket.Dialer{ HandshakeTimeout: 5 * time.Second, }} c, err := network.ConnectTo(joinHostPort(node.Host, node.Port), tlsCfg, cfg) if err != nil { return merry.Wrap(err) } hs, err := c.PerformHandshake() if err != nil { return merry.Wrap(err) } c.StartRoutines() id := c.PeerID() nodeType, _ := types.NodeTypeName(hs.NodeType) if len(nodesOutChan) == cap(nodesOutChan) { time.Sleep(time.Millisecond) //throttling } nodesOutChan <- &Node{ ID: id[:], Host: c.RemoteAddr().(*net.TCPAddr).IP.String(), Port: hs.ServerPort, ProtocolVersion: hs.ProtocolVersion, SoftwareVersion: hs.SoftwareVersion, NodeType: nodeType, } for i := 0; i < 3; i++ { peers, err := c.RequestPeers() if err != nil { break } if len(rawNodesOutChan) == cap(rawNodesOutChan) { time.Sleep(time.Millisecond) //throttling } rawNodesOutChan <- peers.PeerList } return nil } for node := range nodesInChan { err := handleNode(node) if err == nil { atomic.AddInt64(&totalCountOk, 1) } atomic.AddInt64(&stampCount, 1) atomic.AddInt64(&totalCount, 1) logPrint.Trigger() } }() } return worker } func startRawNodesFilter(db *pg.DB, nodeChunksChan chan []types.TimestampedPeerInfo, nodesChan chan *NodeAddr) utils.Worker { worker := utils.NewSimpleWorker(2) cleanupInterval := int64(10 * 60) updateInterval := int64(60 * 60) go func() { defer worker.Done() nodeStamps := make(map[string]int64) lastCleanupStamp := time.Now().Unix() chunksCount := 0 countUsed := 0 countTotal := 0 logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("FILTER: use ratio: %.1f%%, raw nodes in filter: %d", float64(countUsed*100)/float64(countTotal), len(nodeStamps)) countUsed = 0 countTotal = 0 }) prefillNodes := make([]*NodeAddr, 500*1000) _, err := db.Query(&prefillNodes, ` SELECT host, port FROM raw_nodes WHERE NOT seems_off AND updated_at > now() - ? * INTERVAL '1 second' ORDER BY checked_at ASC NULLS FIRST LIMIT ?`, updateInterval, len(prefillNodes)) if err == nil { now := time.Now().Unix() for i, node := range prefillNodes { nodeStamps[joinHostPortToHashKey(node.Host, node.Port)] = now - updateInterval*int64(i)/int64(len(prefillNodes)) } log.Printf("FILTER: prefilled with %d node(s)", len(nodeStamps)) } else { log.Printf("FILTER: failed to prefill: %s", err) } for chunk := range nodeChunksChan { now := time.Now().Unix() if now-lastCleanupStamp > cleanupInterval { count := 0 for addr, stamp := range nodeStamps { if now-stamp > updateInterval { delete(nodeStamps, addr) count += 1 } } log.Printf("FILTER: raw nodes cleanup: %d removed, %d remaining", count, len(nodeStamps)) lastCleanupStamp = now } for _, node := range chunk { key := joinHostPortToHashKey(node.Host, node.Port) if stamp, ok := nodeStamps[key]; !ok || now-stamp > updateInterval { nodesChan <- &NodeAddr{Host: node.Host, Port: node.Port} nodeStamps[key] = now countUsed += 1 } } countTotal += len(chunk) chunksCount += 1 logPrint.Trigger() } }() return worker } func tryGetCountry(gdb, gdb6 *geoip.GeoIP, host string, tryResolve bool) *string { hostIP := net.ParseIP(host) if hostIP == nil { if tryResolve { addrs, _ := net.LookupHost(host) ipFound := false for _, addr := range addrs { hostIP = net.ParseIP(addr) if hostIP != nil { host = addr ipFound = true break } } if !ipFound { return nil } } else { return nil } } if hostIP.To4() == nil { if code, _ := gdb6.GetCountry_v6(host); code != "" { return &code } } else { if code, _ := gdb.GetCountry(host); code != "" { return &code } } return nil } func startNodesLocationChecker(gdb, gdb6 *geoip.GeoIP, nodesIn, nodesOut chan *Node, rawNodesIn, rawNodesOut chan *NodeAddr, numWorkers int) utils.Worker { worker := utils.NewSimpleWorker(2 * numWorkers) for i := 0; i < numWorkers; i++ { go func() { defer worker.Done() for node := range nodesIn { node.Country = tryGetCountry(gdb, gdb6, node.Host, true) nodesOut <- node } close(nodesOut) }() } for i := 0; i < numWorkers; i++ { go func() { defer worker.Done() for node := range rawNodesIn { node.Country = tryGetCountry(gdb, gdb6, node.Host, false) rawNodesOut <- node } close(rawNodesOut) }() } return worker } func startNodesSaver(db *pg.DB, nodesChan chan *Node, chunkSize int) utils.Worker { worker := utils.NewSimpleWorker(1) nodesChanI := make(chan interface{}, 16) count := 0 go func() { for node := range nodesChan { nodesChanI <- node } close(nodesChanI) }() go func() { defer worker.Done() var savesDurSum, savesDurCount int64 logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("SAVE: count: +%d (%d chunks, avg %d ms)", count, savesDurCount, savesDurSum/savesDurCount) savesDurSum = 0 savesDurCount = 0 count = 0 }) err := utils.SaveChunked(db, chunkSize, nodesChanI, func(tx *pg.Tx, items []interface{}) error { for _, nodeI := range items { node := nodeI.(*Node) _, err := tx.Exec(` INSERT INTO nodes (id, host, port, protocol_version, software_version, node_type, country, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, now()) ON CONFLICT (id) DO UPDATE SET host = EXCLUDED.host, port = EXCLUDED.port, protocol_version = EXCLUDED.protocol_version, software_version = EXCLUDED.software_version, node_type = EXCLUDED.node_type, country = EXCLUDED.country, seems_off = false, updated_at = now()`, node.ID, node.Host, node.Port, node.ProtocolVersion, node.SoftwareVersion, node.NodeType, node.Country, ) if err != nil { return merry.Wrap(err) } count += 1 } return nil }, func(saveDur time.Duration) { savesDurSum += int64(saveDur / time.Millisecond) savesDurCount += 1 logPrint.Trigger() }) log.Println("SAVE: done") if err != nil { worker.AddError(err) } }() return worker } func startRawNodesSaver(db *pg.DB, nodesChan chan *NodeAddr, chunkSize int) utils.Worker { worker := utils.NewSimpleWorker(1) nodesChanI := make(chan interface{}, 16) count := 0 go func() { for node := range nodesChan { nodesChanI <- node } close(nodesChanI) }() go func() { defer worker.Done() var savesDurSum, savesDurCount int64 logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("SAVE:RAW: count: +%d (%d chunks, avg %d ms)", count, savesDurCount, savesDurSum/savesDurCount) savesDurSum = 0 savesDurCount = 0 count = 0 }) err := utils.SaveChunked(db, chunkSize, nodesChanI, func(tx *pg.Tx, items []interface{}) error { for _, nodeI := range items { node := nodeI.(*NodeAddr) _, err := tx.Exec(` INSERT INTO raw_nodes (host, port, country, updated_at) VALUES (?, ?, ?, now()) ON CONFLICT (host, port) DO UPDATE SET country = EXCLUDED.country, seems_off = false, updated_at = now()`, node.Host, node.Port, node.Country, ) if err != nil { return merry.Wrap(err) } count += 1 } return nil }, func(saveDur time.Duration) { savesDurSum += int64(saveDur / time.Millisecond) savesDurCount += 1 logPrint.Trigger() }) log.Println("SAVE:RAW: done") if err != nil { worker.AddError(err) } }() return worker } func startNodesListener(sslDir string, nodesChan chan *Node, rawNodesChan chan []types.TimestampedPeerInfo) utils.Worker { worker := utils.NewSimpleWorker(1) type ConnListItem struct { prev *ConnListItem next *ConnListItem conn *network.WSChiaConnection stop chan struct{} } connList := ConnList{limit: 1024} go func() { defer worker.Done() var newConns, peersCount, unexpectedPeersCount int64 logPrint := utils.NewSyncInterval(10*time.Second, func() { peersCount := atomic.SwapInt64(&peersCount, 0) unexpCount := atomic.SwapInt64(&unexpectedPeersCount, 0) curNewConns := atomic.SwapInt64(&newConns, 0) log.Printf("LISTEN: conns: %d (+%d), peers: +%d, unexp.peers: +%d", atomic.LoadInt64(&connList.length), curNewConns, peersCount, unexpCount) }) connHandler := func(c *network.WSChiaConnection) { atomic.AddInt64(&newConns, 1) connItem := connList.PushConn(c) defer func() { connList.DelItemUnlesRemoved(connItem) c.Close() }() if item := connList.ShiftIfNeed(); item != nil { item.stop <- struct{}{} } shortID := c.PeerIDHex()[0:8] logPrint.Trigger() c.SetMessageHandler(func(msgID uint16, msg chiautils.FromBytes) { switch msg := msg.(type) { case *types.RequestPeers: c.SendReply(msgID, types.RespondPeers{PeerList: nil}) case *types.RespondPeers: atomic.AddInt64(&unexpectedPeersCount, int64(len(msg.PeerList))) rawNodesChan <- msg.PeerList case *types.RequestBlock: c.SendReply(msgID, types.RejectBlock{Height: msg.Height}) case *types.NewPeak, *types.NewCompactVDF, *types.NewSignagePointOrEndOfSubSlot, *types.NewUnfinishedBlock, *types.RequestMempoolTransactions, *types.NewTransaction: // do nothing default: log.Printf("LISTEN: %s: unexpected message: %#v", shortID, msg) } }) hs, err := c.PerformHandshake() if err != nil { log.Printf("LISTEN: %s: handshake error: %s", shortID, err) c.Close() return } c.StartRoutines() id := c.PeerID() nodeType, _ := types.NodeTypeName(hs.NodeType) nodesChan <- &Node{ ID: id[:], Host: c.RemoteAddr().(*net.TCPAddr).IP.String(), Port: hs.ServerPort, ProtocolVersion: hs.ProtocolVersion, SoftwareVersion: hs.SoftwareVersion, NodeType: nodeType, } for i := 0; ; i++ { peers, err := c.RequestPeers() if err != nil { log.Printf("LISTEN: %s: peers error: %s", shortID, err) return } timer := time.NewTimer(30 * time.Second) select { case rawNodesChan <- peers.PeerList: atomic.AddInt64(&peersCount, int64(len(peers.PeerList))) timer.Stop() case <-timer.C: log.Printf("LISTEN: %s: timeout adding peers chunk", shortID) } if i%60 == 59 { ip, err := askIP() if err == nil { c.Send(types.RespondPeers{PeerList: []types.TimestampedPeerInfo{ {Host: ip, Port: network.SERVER_PORT, Timestamp: uint64(time.Now().Unix())}, }}) } else { log.Printf("LISTEN: %s: ask IP error: %s", shortID, err) } } logPrint.Trigger() timer = time.NewTimer(5 * time.Minute) select { case <-connItem.stop: timer.Stop() return case <-timer.C: // } } } err := network.ListenOn("0.0.0.0", sslDir+"/ca/chia_ca.crt", sslDir+"/ca/chia_ca.key", nil, connHandler) if err != nil { worker.AddError(err) } }() return worker } func startSeemsOffUpdater(db *pg.DB) utils.Worker { worker := utils.NewSimpleWorker(1) go func() { defer worker.Done() for { stt := time.Now().UnixNano() _, err := db.Exec(` UPDATE nodes SET seems_off = true WHERE NOT seems_off AND checked_at IS NOT NULL AND updated_at < NOW() - INTERVAL '7 days'`) if err != nil { log.Printf("SEEMS_OFF:RAW: error: %s", err) } dur := time.Now().UnixNano() - stt stt = time.Now().UnixNano() _, err = db.Exec(` UPDATE raw_nodes SET seems_off = true WHERE NOT seems_off AND checked_at IS NOT NULL AND updated_at < NOW() - INTERVAL '7 days'`) if err != nil { log.Printf("SEEMS_OFF: raw error: %s", err) } durRaw := time.Now().UnixNano() - stt log.Printf("SEEMS_OFF: updated nodes in %d ms, raw in %d ms", dur/1000/1000, durRaw/1000/1000) time.Sleep(30 * time.Minute) } }() return worker } func CMDUpdateNodes() error { sslDir := flag.String("ssl-dir", utils.HomeDirOrEmpty("/.chia/mainnet/ssl"), "path to chia/mainnet/ssl directory") flag.Parse() db := utils.MakePGConnection() gdb, gdb6, err := utils.MakeGeoIPConnection() if err != nil { return merry.Wrap(err) } dbNodeAddrs := make(chan *NodeAddr, 32) rawNodeChunks := make(chan []types.TimestampedPeerInfo, 16) nodesNoLoc := make(chan *Node, 16) rawNodesNoLoc := make(chan *NodeAddr, 16) nodesOut := make(chan *Node, 32) rawNodesOut := make(chan *NodeAddr, 256) workers := []utils.Worker{ // input startOldNodesLoader(db, dbNodeAddrs, 512), startNodesChecker(db, *sslDir, dbNodeAddrs, nodesNoLoc, rawNodeChunks, 256), startNodesListener(*sslDir, nodesNoLoc, rawNodeChunks), // process startRawNodesFilter(db, rawNodeChunks, rawNodesNoLoc), startNodesLocationChecker(gdb, gdb6, nodesNoLoc, nodesOut, rawNodesNoLoc, rawNodesOut, 32), // save startNodesSaver(db, nodesOut, 32), startRawNodesSaver(db, rawNodesOut, 512), // misc startSeemsOffUpdater(db), } logPrint := utils.NewSyncInterval(10*time.Second, func() { log.Printf("UPDATE: chans: (%d) -> (%d, %d -> %d) -> (%d, %d)", len(dbNodeAddrs), len(nodesNoLoc), len(rawNodeChunks), len(rawNodesNoLoc), len(nodesOut), len(rawNodesOut)) time.Sleep(10 * time.Second) }) for { for _, worker := range workers { if err := worker.PopError(); err != nil { return err } } logPrint.Trigger() time.Sleep(time.Second) } }
askIP
identifier_name
cadastro_orcamento_emp.js
var globalIndex= 0; var isEdition= false; var flag; var isChange = false; var isAppend = false; var isFree = false; var vlrTotal= 0; $(document).ready(function() { loadPage = function(isEd){ var aux= 0; isEdition = isEd; } addRowServico = function() { $("#gridLines").remove(); var last = $("#dataBank").html(); var htm = getRowMonted(globalIndex); if (htm != "") { htm = last + htm; $("#dataBank").empty(); $("#dataBank").append(htm); $("#servicoRefIn").val(""); $("#especialidadeIn").val(""); $("#servicoIn").val(""); $("#qtdeIn").val("1"); $("#vlrUnitIn").val("0.00"); getValor(); $("#servicoRefIn").focus(); if (globalIndex == 0) { $('#tabelaIn').attr("disabled", "disabled"); } globalIndex++; } } removeRowServico= function() { var htm = ""; var indexNew = 0; $("#dataBank >*").each(function(ind, domEle) { if (document.getElementById("checkrowTabela" + ind).checked) { globalIndex--; indexNew++; } else { htm+= getRowForDel(ind - indexNew , ind); } }); $("#dataBank").empty(); $("#dataBank").append(htm); getValor(); if (globalIndex < 1)
$("#servicoRefIn").focus(); } getRowMonted = function (value) { var seletorTabela = document.getElementById("tabelaIn"); var seletorSetor = document.getElementById("setorIn"); if ((seletorTabela.value == "") || (seletorSetor.value == "") || ($("#servicoRefIn").val() == "") || ($("#servicoIn").val() == "") || ($("#qtdeIn").val() == "")) { showWarning({ width: 400, mensagem: "Para realizar esta operação é necessário prrencher todos os campos de procedimento.", title: "Acesso Negado" }); return ""; } var htm = "<tr class=\"gridRow\" id=\"line" + value + "\" name=\"line" + value + "\" ><td><label id=\"rowSetor" + value + "\" name=\"rowSetor" + value + "\" >"; switch (seletorSetor.value) { case 'o': htm += "Odontológica</label></td>"; break; case 'l': htm += "Laboratorial</label></td>"; break; case 'm': htm += "Médica</label></td>"; break; case 'h': htm += "Hospitalar</label></td>"; break; } htm += "<td><label id=\"rowEspecialidade" + value + "\" name=\"rowEspecialidade" + value + "\" >" + $("#especialidadeIn").val() + "</label></td>"; htm+= "<td><label id=\"rowCodigo" + value + "\" name=\"rowCodigo" + value + "\" >" + $("#servicoRefIn").val() + "</label></td>"; htm += "<td><label id=\"rowServico" + value + "\" name=\"rowServico" + value + "\" >" + $("#servicoIn").val() + "</label></td>"; htm += "<td><label id=\"rowValor" + value + "\" name=\"rowValor" + value + "\" >" + $("#vlrUnitIn").val() + "</label></td>"; htm += "<td><label id=\"rowQtde" + value + "\" name=\"rowQtde" + value + "\" >" + $("#qtdeIn").val() + "</label></td>"; htm += "<td><label id=\"rowTotal" + value + "\" name=\"rowTotal" + value + "\" >" + formatCurrency(parseFloat($("#qtdeIn").val()) * parseFloat($("#vlrUnitIn").val())) + "</label></td>"; htm += "<td style=\"width: 10px;\"><input id=\"checkrowTabela" + value + "\" name=\"checkrowTabela" + value + "\" type=\"checkbox\"/></td></tr>"; return htm; } getRowForDel = function (value, indexRow) { var htm = "<tr class=\"gridRow\" id=\"line" + value + "\" name=\"line" + value + "\" ><td><label id=\"rowSetor" + value + "\" name=\"rowSetor" + value + "\" >" + $("#rowSetor" + indexRow).text() + "</label></td>"; htm += "<td><label id=\"rowEspecialidade" + value + "\" name=\"rowEspecialidade" + value + "\" >" + $("#rowEspecialidade" + indexRow).text() + "</label></td>"; htm += "<td><label id=\"rowCodigo" + value + "\" name=\"rowCodigo" + value + "\" >" + $("#rowCodigo" + indexRow).text() + "</label></td>"; htm += "<td><label id=\"rowServico" + value + "\" name=\"rowServico" + value + "\" >" + $("#rowServico" + indexRow).text() + "</label></td>"; htm += "<td><label id=\"rowValor" + value + "\" name=\"rowValor" + value + "\" >" + $("#rowValor" + indexRow).text() + "</label></td>"; htm += "<td><label id=\"rowQtde" + value + "\" name=\"rowQtde" + value + "\" >" + $("#rowQtde" + indexRow).text() + "</label></td>"; htm += "<td><label id=\"rowTotal" + value + "\" name=\"rowTotal" + value + "\" >" + $("#rowTotal" + indexRow).text() + "</label></td>"; htm += "<td style=\"width: 10px;\"><input id=\"checkrowTabela" + value + "\" name=\"checkrowTabela" + value + "\" type=\"checkbox\"/></td></tr>"; return htm; } processarEmpresas = function () { if ($('#unidadeIdIn').val() != "" && $('#setorIn').val()) { $.get("../FuncionarioGet",{ unidadeId: getPart($('#unidadeIdIn').val(), 2), setor: $('#setorIn').val(), from: "16" }, function (response) { var empresas = "<option value=\"\">Selecione</option>"; var tabelaVigencia = empresas; var pipeTabela = unmountPipe(response); var pipe = unmountPipe(ptVirgulaToRealPipe(pipeTabela[0])); pipeTabela = unmountPipe(ptVirgulaToRealPipe(pipeTabela[1])); for(var i=0; i<pipe.length; i++) { empresas+= "<option value=\"" + getPipeByIndex(pipe[i], 0) + "@" + getPipeByIndex(pipe[i], 2) + "\">" + getPipeByIndex(pipe[i], 1) + "</option>"; } for(var i = 0; i < pipeTabela.length; i++) { tabelaVigencia+= "<option value=\"" + getPart(pipeTabela[i], 1) + "\">" + getPart(pipeTabela[i], 2) + "</option>"; } $('#empIdIn').empty(); $('#empIdIn').append(empresas); if (!isEdition) { $('#tabelaIn').empty(); $('#tabelaIn').append(tabelaVigencia); } document.getElementById("empIdIn").selectedIndex= 0; } ); } else { $('#empIdIn').empty(); $('#empIdIn').append("<option value=\"\">Selecione</option>"); } } $('#unidadeIdIn').change(function(){ $('#unidadeIn').val(getPart($('#unidadeIdIn').val(), 1)); processarEmpresas(); }); $('#setorIn').change(function(){ processarEmpresas(); }); $('#empIdIn').change(function() { if($('#empIdIn').val() != "") { index = document.getElementById("empIdIn").selectedIndex; $('#conselhoIn').val(getPart(document.getElementById("empIdIn").options[index].value, 2)); } else { $('#conselhoIn').val(""); } }); $('#empIdIn').blur(function() { if ($('#empIdIn').val() != "") { if ($('#ctr').val() != "") { $("#servicoRefIn").removeAttr("readOnly", "readOnly"); $('#qtdeIn').removeAttr("readOnly", "readOnly"); } } else { $('#servicoRefIn').attr({readOnly: "readOnly"}); $('#qtdeIn').attr({readOnly: "readOnly"}); } $("#ctr").focus(); }); $('#ctr').blur(function() { if($('#ctr').val() != "") { if ($('#empIdIn').val() != "") { $("#servicoRefIn").removeAttr("readOnly", "readOnly"); $('#qtdeIn').removeAttr("readOnly", "readOnly"); } $.get("../FuncionarioGet",{ ctr: $('#ctr').val(), unidade: $('#unidadeIdIn').val(), from: "12" }, function (response) { var pipe = unmountPipe(response); var aux = "<option value=\"\">Selecione</option>"; for(var i=0; i<pipe.length; i++) { if (i == 0) { $("#userIdIn").val(getPipeByIndex(pipe[i], 0)); $("#usuarioIn").val(getPipeByIndex(pipe[i], 1)); if (getPipeByIndex(pipe[i], 2) == 2) { $("#tabelaIn").val('particular'); } } else { aux+= "<option value=\"" + getPart(pipe[i], 1) + "\">" + getPart(pipe[i], 2) + "</option>"; } } $('#dependenteIdIn').empty(); $('#dependenteIdIn').append(aux); document.getElementById("dependenteIdIn").selectedIndex= 0; } ); } else { $('#servicoRefIn').attr({readOnly: "readOnly"}); $('#qtdeIn').attr({readOnly: "readOnly"}); $("#usuarioIn").val(""); } $('#dependenteIdIn').focus(); }); $('#servicoRefIn').blur(function() { var qtde = 0; $("#dataBank >*").each(function (index, domEle) { if ($("#rowCodigo" + index).text() == $("#servicoRefIn").val()) { qtde++; } }); var seletorSetor = document.getElementById("setorIn"); var seletorTabela = document.getElementById("tabelaIn"); $.get("../FuncionarioGet",{ referencia: $("#servicoRefIn").val(), unidade: getPart($("#unidadeIdIn").val(), 2), setor: seletorSetor.value, tabela: seletorTabela.value, usuarioId : $("#userIdIn").val(), dependente: $("#dependenteIdIn").val(), qtde: qtde, from: "13"}, function (response) { $("#servicoIdIn").val(getPipeByIndex(response, 0)); $("#servicoIn").val(getPipeByIndex(response, 1)); $("#vlrUnitIn").val(getPipeByIndex(response, 2)); $("#especialidadeIn").val(getPipeByIndex(response, 3)); $('#qtdeIn').focus(); } ); }); subtractValor= function() { aux= 0; var teste= document.getElementById("checkrowTabela" + aux); while(document.getElementById("checkrowTabela" + aux) != undefined) { if (document.getElementById("checkrowTabela" + aux).checked) { vlrTotal-= parseFloat($('#noneD' + aux).val()); } aux++; } $('#total').empty(); $('#total').append(": " + formatDecimal(vlrTotal)); } getValor= function() { var total = 0; $("#dataBank > *").each(function (ind, domEle) { total += parseFloat($("#rowTotal" + ind).text()); }); $("#total").text(formatCurrency(total)); } /** * erroSalve * @param {type} */ erroSalve = function () { showErrorMessage ({ width: 400, mensagem: "Não é permitida a edição de orçamentos.", title: "Erro" }); } $("input").keypress(function (e) { if (e.which == 13) { addRowServico(); } }); $("#tabelaIn").change(function() { isChange = true; }); mountTabelaPost = function () { var htm = ""; $("#dataBank > *").each(function (ind, domEle) { htm += "<input type=\"hidden\" id=\"rowCodigo" + ind + "\" name=\"rowCodigo" + ind + "\" value=\"" + $("#rowCodigo" + ind).text() + "\">"; htm += "<input type=\"hidden\" id=\"rowQtde" + ind + "\" name=\"rowQtde" + ind + "\" value=\"" + $("#rowQtde" + ind).text() + "\">"; }); $("#dataBank").remove(); $("#localTabela").append(htm); } /** * enviarOrcamento * @param {type} */ enviarOrcamento = function() { if (flag && isChange) { if (($("#tabelaIn").val() != "") && ($("#haveParcela").val() == "n")) { showOption({ mensagem: "Atenção! A tabela de referencia foi alterada. Deseja manter a tabela selecionada?", title: "Salvar", width: 350, buttons: { "Não": function () { location.href = "orcamento_empresa.jsp?id=" + $("#userIdIn").val(); }, "Sim": function() { $("#tabelaIn").removeAttr("disabled", "disabled"); mountTabelaPost(); document.forms["formPost"].submit(); } } }); } else if($("#haveParcela").val() == "s") { showErrorMessage ({ width: 400, mensagem: "Não é possível mudar a tabela de um orçamento já parcelado.", title: "Erro" }); } else { showErrorMessage ({ width: 400, mensagem: "Escolha uma tabela de referencia.", title: "Erro" }); } } else if(flag) { location.href = "orcamento_empresa.jsp?id=" + $("#userIdIn").val(); } else { $("#tabelaIn").removeAttr("disabled", "disabled"); mountTabelaPost(); document.forms["formPost"].submit(); } } noAccess = function() { showErrorMessage ({ width: 400, mensagem: "Não é permitido ao administrador cadastrar orçamentos!", title: "Acesso Negado" }); } /** * printOrcamento * @param {type} */ printOrcamento = function() { var top = (screen.height - 600)/2; var left= (screen.width - 800)/2; window.close(); window.open("../GeradorRelatorio?rel=169&parametros=228@" + $("#orcIdIn").val(), 'nova', 'width= 800, height= 600, top= ' + top + ', left= ' + left); } $(this).ajaxStart(function(){ //showLoader('Carregando...', 'body', false); }); $(this).ajaxStop(function(){ //hideLoader(); }); });
{ $('#tabelaIn').removeAttr("disabled", "disabled"); }
conditional_block
cadastro_orcamento_emp.js
var globalIndex= 0; var isEdition= false; var flag; var isChange = false; var isAppend = false; var isFree = false; var vlrTotal= 0; $(document).ready(function() { loadPage = function(isEd){ var aux= 0; isEdition = isEd; } addRowServico = function() { $("#gridLines").remove(); var last = $("#dataBank").html(); var htm = getRowMonted(globalIndex); if (htm != "") { htm = last + htm; $("#dataBank").empty(); $("#dataBank").append(htm); $("#servicoRefIn").val(""); $("#especialidadeIn").val(""); $("#servicoIn").val(""); $("#qtdeIn").val("1"); $("#vlrUnitIn").val("0.00"); getValor(); $("#servicoRefIn").focus(); if (globalIndex == 0) { $('#tabelaIn').attr("disabled", "disabled"); } globalIndex++; } } removeRowServico= function() { var htm = ""; var indexNew = 0; $("#dataBank >*").each(function(ind, domEle) { if (document.getElementById("checkrowTabela" + ind).checked) { globalIndex--; indexNew++; } else { htm+= getRowForDel(ind - indexNew , ind); } }); $("#dataBank").empty(); $("#dataBank").append(htm); getValor(); if (globalIndex < 1) { $('#tabelaIn').removeAttr("disabled", "disabled"); } $("#servicoRefIn").focus(); } getRowMonted = function (value) { var seletorTabela = document.getElementById("tabelaIn"); var seletorSetor = document.getElementById("setorIn"); if ((seletorTabela.value == "") || (seletorSetor.value == "") || ($("#servicoRefIn").val() == "") || ($("#servicoIn").val() == "") || ($("#qtdeIn").val() == "")) { showWarning({ width: 400, mensagem: "Para realizar esta operação é necessário prrencher todos os campos de procedimento.", title: "Acesso Negado" }); return ""; } var htm = "<tr class=\"gridRow\" id=\"line" + value + "\" name=\"line" + value + "\" ><td><label id=\"rowSetor" + value + "\" name=\"rowSetor" + value + "\" >"; switch (seletorSetor.value) { case 'o': htm += "Odontológica</label></td>"; break; case 'l': htm += "Laboratorial</label></td>"; break; case 'm': htm += "Médica</label></td>"; break; case 'h': htm += "Hospitalar</label></td>"; break; } htm += "<td><label id=\"rowEspecialidade" + value + "\" name=\"rowEspecialidade" + value + "\" >" + $("#especialidadeIn").val() + "</label></td>"; htm+= "<td><label id=\"rowCodigo" + value + "\" name=\"rowCodigo" + value + "\" >" + $("#servicoRefIn").val() + "</label></td>"; htm += "<td><label id=\"rowServico" + value + "\" name=\"rowServico" + value + "\" >" + $("#servicoIn").val() + "</label></td>"; htm += "<td><label id=\"rowValor" + value + "\" name=\"rowValor" + value + "\" >" + $("#vlrUnitIn").val() + "</label></td>"; htm += "<td><label id=\"rowQtde" + value + "\" name=\"rowQtde" + value + "\" >" + $("#qtdeIn").val() + "</label></td>"; htm += "<td><label id=\"rowTotal" + value + "\" name=\"rowTotal" + value + "\" >" + formatCurrency(parseFloat($("#qtdeIn").val()) * parseFloat($("#vlrUnitIn").val())) + "</label></td>";
return htm; } getRowForDel = function (value, indexRow) { var htm = "<tr class=\"gridRow\" id=\"line" + value + "\" name=\"line" + value + "\" ><td><label id=\"rowSetor" + value + "\" name=\"rowSetor" + value + "\" >" + $("#rowSetor" + indexRow).text() + "</label></td>"; htm += "<td><label id=\"rowEspecialidade" + value + "\" name=\"rowEspecialidade" + value + "\" >" + $("#rowEspecialidade" + indexRow).text() + "</label></td>"; htm += "<td><label id=\"rowCodigo" + value + "\" name=\"rowCodigo" + value + "\" >" + $("#rowCodigo" + indexRow).text() + "</label></td>"; htm += "<td><label id=\"rowServico" + value + "\" name=\"rowServico" + value + "\" >" + $("#rowServico" + indexRow).text() + "</label></td>"; htm += "<td><label id=\"rowValor" + value + "\" name=\"rowValor" + value + "\" >" + $("#rowValor" + indexRow).text() + "</label></td>"; htm += "<td><label id=\"rowQtde" + value + "\" name=\"rowQtde" + value + "\" >" + $("#rowQtde" + indexRow).text() + "</label></td>"; htm += "<td><label id=\"rowTotal" + value + "\" name=\"rowTotal" + value + "\" >" + $("#rowTotal" + indexRow).text() + "</label></td>"; htm += "<td style=\"width: 10px;\"><input id=\"checkrowTabela" + value + "\" name=\"checkrowTabela" + value + "\" type=\"checkbox\"/></td></tr>"; return htm; } processarEmpresas = function () { if ($('#unidadeIdIn').val() != "" && $('#setorIn').val()) { $.get("../FuncionarioGet",{ unidadeId: getPart($('#unidadeIdIn').val(), 2), setor: $('#setorIn').val(), from: "16" }, function (response) { var empresas = "<option value=\"\">Selecione</option>"; var tabelaVigencia = empresas; var pipeTabela = unmountPipe(response); var pipe = unmountPipe(ptVirgulaToRealPipe(pipeTabela[0])); pipeTabela = unmountPipe(ptVirgulaToRealPipe(pipeTabela[1])); for(var i=0; i<pipe.length; i++) { empresas+= "<option value=\"" + getPipeByIndex(pipe[i], 0) + "@" + getPipeByIndex(pipe[i], 2) + "\">" + getPipeByIndex(pipe[i], 1) + "</option>"; } for(var i = 0; i < pipeTabela.length; i++) { tabelaVigencia+= "<option value=\"" + getPart(pipeTabela[i], 1) + "\">" + getPart(pipeTabela[i], 2) + "</option>"; } $('#empIdIn').empty(); $('#empIdIn').append(empresas); if (!isEdition) { $('#tabelaIn').empty(); $('#tabelaIn').append(tabelaVigencia); } document.getElementById("empIdIn").selectedIndex= 0; } ); } else { $('#empIdIn').empty(); $('#empIdIn').append("<option value=\"\">Selecione</option>"); } } $('#unidadeIdIn').change(function(){ $('#unidadeIn').val(getPart($('#unidadeIdIn').val(), 1)); processarEmpresas(); }); $('#setorIn').change(function(){ processarEmpresas(); }); $('#empIdIn').change(function() { if($('#empIdIn').val() != "") { index = document.getElementById("empIdIn").selectedIndex; $('#conselhoIn').val(getPart(document.getElementById("empIdIn").options[index].value, 2)); } else { $('#conselhoIn').val(""); } }); $('#empIdIn').blur(function() { if ($('#empIdIn').val() != "") { if ($('#ctr').val() != "") { $("#servicoRefIn").removeAttr("readOnly", "readOnly"); $('#qtdeIn').removeAttr("readOnly", "readOnly"); } } else { $('#servicoRefIn').attr({readOnly: "readOnly"}); $('#qtdeIn').attr({readOnly: "readOnly"}); } $("#ctr").focus(); }); $('#ctr').blur(function() { if($('#ctr').val() != "") { if ($('#empIdIn').val() != "") { $("#servicoRefIn").removeAttr("readOnly", "readOnly"); $('#qtdeIn').removeAttr("readOnly", "readOnly"); } $.get("../FuncionarioGet",{ ctr: $('#ctr').val(), unidade: $('#unidadeIdIn').val(), from: "12" }, function (response) { var pipe = unmountPipe(response); var aux = "<option value=\"\">Selecione</option>"; for(var i=0; i<pipe.length; i++) { if (i == 0) { $("#userIdIn").val(getPipeByIndex(pipe[i], 0)); $("#usuarioIn").val(getPipeByIndex(pipe[i], 1)); if (getPipeByIndex(pipe[i], 2) == 2) { $("#tabelaIn").val('particular'); } } else { aux+= "<option value=\"" + getPart(pipe[i], 1) + "\">" + getPart(pipe[i], 2) + "</option>"; } } $('#dependenteIdIn').empty(); $('#dependenteIdIn').append(aux); document.getElementById("dependenteIdIn").selectedIndex= 0; } ); } else { $('#servicoRefIn').attr({readOnly: "readOnly"}); $('#qtdeIn').attr({readOnly: "readOnly"}); $("#usuarioIn").val(""); } $('#dependenteIdIn').focus(); }); $('#servicoRefIn').blur(function() { var qtde = 0; $("#dataBank >*").each(function (index, domEle) { if ($("#rowCodigo" + index).text() == $("#servicoRefIn").val()) { qtde++; } }); var seletorSetor = document.getElementById("setorIn"); var seletorTabela = document.getElementById("tabelaIn"); $.get("../FuncionarioGet",{ referencia: $("#servicoRefIn").val(), unidade: getPart($("#unidadeIdIn").val(), 2), setor: seletorSetor.value, tabela: seletorTabela.value, usuarioId : $("#userIdIn").val(), dependente: $("#dependenteIdIn").val(), qtde: qtde, from: "13"}, function (response) { $("#servicoIdIn").val(getPipeByIndex(response, 0)); $("#servicoIn").val(getPipeByIndex(response, 1)); $("#vlrUnitIn").val(getPipeByIndex(response, 2)); $("#especialidadeIn").val(getPipeByIndex(response, 3)); $('#qtdeIn').focus(); } ); }); subtractValor= function() { aux= 0; var teste= document.getElementById("checkrowTabela" + aux); while(document.getElementById("checkrowTabela" + aux) != undefined) { if (document.getElementById("checkrowTabela" + aux).checked) { vlrTotal-= parseFloat($('#noneD' + aux).val()); } aux++; } $('#total').empty(); $('#total').append(": " + formatDecimal(vlrTotal)); } getValor= function() { var total = 0; $("#dataBank > *").each(function (ind, domEle) { total += parseFloat($("#rowTotal" + ind).text()); }); $("#total").text(formatCurrency(total)); } /** * erroSalve * @param {type} */ erroSalve = function () { showErrorMessage ({ width: 400, mensagem: "Não é permitida a edição de orçamentos.", title: "Erro" }); } $("input").keypress(function (e) { if (e.which == 13) { addRowServico(); } }); $("#tabelaIn").change(function() { isChange = true; }); mountTabelaPost = function () { var htm = ""; $("#dataBank > *").each(function (ind, domEle) { htm += "<input type=\"hidden\" id=\"rowCodigo" + ind + "\" name=\"rowCodigo" + ind + "\" value=\"" + $("#rowCodigo" + ind).text() + "\">"; htm += "<input type=\"hidden\" id=\"rowQtde" + ind + "\" name=\"rowQtde" + ind + "\" value=\"" + $("#rowQtde" + ind).text() + "\">"; }); $("#dataBank").remove(); $("#localTabela").append(htm); } /** * enviarOrcamento * @param {type} */ enviarOrcamento = function() { if (flag && isChange) { if (($("#tabelaIn").val() != "") && ($("#haveParcela").val() == "n")) { showOption({ mensagem: "Atenção! A tabela de referencia foi alterada. Deseja manter a tabela selecionada?", title: "Salvar", width: 350, buttons: { "Não": function () { location.href = "orcamento_empresa.jsp?id=" + $("#userIdIn").val(); }, "Sim": function() { $("#tabelaIn").removeAttr("disabled", "disabled"); mountTabelaPost(); document.forms["formPost"].submit(); } } }); } else if($("#haveParcela").val() == "s") { showErrorMessage ({ width: 400, mensagem: "Não é possível mudar a tabela de um orçamento já parcelado.", title: "Erro" }); } else { showErrorMessage ({ width: 400, mensagem: "Escolha uma tabela de referencia.", title: "Erro" }); } } else if(flag) { location.href = "orcamento_empresa.jsp?id=" + $("#userIdIn").val(); } else { $("#tabelaIn").removeAttr("disabled", "disabled"); mountTabelaPost(); document.forms["formPost"].submit(); } } noAccess = function() { showErrorMessage ({ width: 400, mensagem: "Não é permitido ao administrador cadastrar orçamentos!", title: "Acesso Negado" }); } /** * printOrcamento * @param {type} */ printOrcamento = function() { var top = (screen.height - 600)/2; var left= (screen.width - 800)/2; window.close(); window.open("../GeradorRelatorio?rel=169&parametros=228@" + $("#orcIdIn").val(), 'nova', 'width= 800, height= 600, top= ' + top + ', left= ' + left); } $(this).ajaxStart(function(){ //showLoader('Carregando...', 'body', false); }); $(this).ajaxStop(function(){ //hideLoader(); }); });
htm += "<td style=\"width: 10px;\"><input id=\"checkrowTabela" + value + "\" name=\"checkrowTabela" + value + "\" type=\"checkbox\"/></td></tr>";
random_line_split
toolchain_tester.py
#!/usr/bin/env python # Copyright (c) 2011 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Simple test suite for toolchains espcially llvm arm toolchains. Sample invocations tools/toolchain_tester/toolchain_tester.py [options]+ test1.c test2.c ... where options are --config <config> --append <tag>=<value> --append_file=<filename> --verbose --show_console --exclude=<filename> --tmp=<path> --check_excludes --concurrency=<number> e.g. --append "CFLAGS:-lsupc++" will enable C++ eh support NOTE: the location of tmp files is intentionally hardcoded, so you can only run one instance of this at a time. """ from __future__ import print_function import getopt import glob import multiprocessing import os import shlex import subprocess import sys import time import toolchain_config # ====================================================================== # Options # ====================================================================== # list of streams being logged to (both normal and verbose output) REPORT_STREAMS = [sys.stdout] # max time (secs) to wait for command any command to complete TIMEOUT = 120 # enable verbose output, e.g. commands being executed VERBOSE = 0 # prefix for temporary files TMP_PREFIX = '/tmp/tc_test_' # show command output (stdout/stderr) SHOW_CONSOLE = 1 # append these settings to config APPEND = [] # append these settings to config, for a given test (specified by APPEND_FILES) APPEND_PER_TEST = {} # Files listing the APPEND_PER_TEST entries. APPEND_FILES = [] # exclude these tests EXCLUDE = {} # check whether excludes are still necessary CHECK_EXCLUDES = 0 # Files listing excluded tests EXCLUDE_FILES = [] # module with settings for compiler, etc. CFG = None # Number of simultaneous test processes CONCURRENCY = 1 # Child processes push failing test results onto this queue ERRORS = multiprocessing.Queue() # ====================================================================== # Hook print to we can print to both stdout and a file def Print(message): for s in REPORT_STREAMS: print(message, file=s) # ====================================================================== def Banner(message): Print('=' * 70) Print(message) Print('=' * 70) # ====================================================================== def RunCommand(cmd, always_dump_stdout_stderr): """Run a shell command given as an argv style vector.""" if VERBOSE: Print(str(cmd)) Print(" ".join(cmd)) start = time.time() p = subprocess.Popen(cmd, bufsize=1000*1000, stderr=subprocess.PIPE, stdout=subprocess.PIPE) while p.poll() is None: time.sleep(0.1) now = time.time() if now - start > TIMEOUT: Print('Error: timeout') Print('Killing pid %d' % p.pid) os.waitpid(-1, os.WNOHANG) return -1 stdout = p.stdout.read() stderr = p.stderr.read() retcode = p.wait() if retcode != 0: Print('Error: command failed %d %s' % (retcode, ' '.join(cmd))) always_dump_stdout_stderr = True if always_dump_stdout_stderr: Print(stderr) Print(stdout) return retcode def RemoveTempFiles(): global TMP_PREFIX for f in glob.glob(TMP_PREFIX + '*'): os.remove(f) def MakeExecutableCustom(config, test, extra): global TMP_PREFIX global SHOW_CONSOLE d = extra.copy() d['tmp'] = (TMP_PREFIX + '_' + os.path.basename(os.path.dirname(test)) + '_' + os.path.basename(test)) d['src'] = test for phase, command in config.GetCommands(d): command = shlex.split(command) try: retcode = RunCommand(command, SHOW_CONSOLE) except Exception as err: Print("cannot run phase %s: %s" % (phase, str(err))) return phase if retcode: return phase # success return '' def ParseExcludeFiles(config_attributes): ''' Parse the files containing tests to exclude (i.e. expected fails). Each line may contain a comma-separated list of attributes restricting the test configurations which are expected to fail. (e.g. architecture or optimization level). A test is only excluded if the configuration has all the attributes specified in the exclude line. Lines which have no attributes will match everything, and lines which specify only one attribute (e.g. architecture) will match all configurations with that attributed (e.g. both opt levels with that architecture) ''' for excludefile in EXCLUDE_FILES: f = open(excludefile) for line in f: line = line.strip() if not line: continue if line.startswith('#'): continue tokens = line.split() if len(tokens) > 1: attributes = set(tokens[1].split(',')) if not attributes.issubset(config_attributes): continue test = tokens[0] else: test = line if test in EXCLUDE: Print('ERROR: duplicate exclude: [%s]' % line) EXCLUDE[test] = excludefile f.close() Print('Size of excludes now: %d' % len(EXCLUDE)) def ParseAppendFiles(): """Parse the file contain a list of test + CFLAGS to append for that test.""" for append_file in APPEND_FILES: f = open(append_file) for line in f: line = line.strip() if not line: continue if line.startswith('#'): continue tokens = line.split(',') test = tokens[0] to_append = {} for t in tokens[1:]: tag, value = t.split(':') if tag in to_append: to_append[tag] = to_append[tag] + ' ' + value else: to_append[tag] = value if test in APPEND_PER_TEST: raise Exception('Duplicate append/flags for test %s (old %s, new %s)' % (test, APPEND_PER_TEST[test], to_append)) APPEND_PER_TEST[test] = to_append f.close() def ParseCommandLineArgs(argv): """Process command line options and return the unprocessed left overs.""" global VERBOSE, COMPILE_MODE, RUN_MODE, TMP_PREFIX global CFG, APPEND, SHOW_CONSOLE, CHECK_EXCLUDES, CONCURRENCY try: opts, args = getopt.getopt(argv[1:], '', ['verbose', 'show_console', 'append=', 'append_file=', 'config=', 'exclude=', 'check_excludes',
'concurrency=']) except getopt.GetoptError as err: Print(str(err)) # will print something like 'option -a not recognized' sys.exit(-1) for o, a in opts: # strip the leading '--' o = o[2:] if o == 'verbose': VERBOSE = 1 elif o == 'show_console': SHOW_CONSOLE = 1 elif o == 'check_excludes': CHECK_EXCLUDES = 1 elif o == 'tmp': TMP_PREFIX = a elif o == 'exclude': # Parsing of exclude files must happen after we know the current config EXCLUDE_FILES.append(a) elif o == 'append': tag, value = a.split(":", 1) APPEND.append((tag, value)) elif o == 'append_file': APPEND_FILES.append(a) elif o == 'config': CFG = a elif o == 'concurrency': CONCURRENCY = int(a) else: Print('ERROR: bad commandline arg: %s' % o) sys.exit(-1) # return the unprocessed options, i.e. the command return args def RunTest(args): num, total, config, test, extra_flags = args base_test_name = os.path.basename(test) extra_flags = extra_flags.copy() toolchain_config.AppendDictionary(extra_flags, APPEND_PER_TEST.get(base_test_name, {})) Print('Running %d/%d: %s' % (num + 1, total, base_test_name)) try: result = MakeExecutableCustom(config, test, extra_flags) except KeyboardInterrupt: # Swallow the keyboard interrupt in the child. Otherwise the parent # hangs trying to join it. pass if result and config.IsFlaky(): # TODO(dschuff): deflake qemu or switch to hardware # BUG=http://code.google.com/p/nativeclient/issues/detail?id=2197 # try it again, and only fail on consecutive failures Print('Retrying ' + base_test_name) result = MakeExecutableCustom(config, test, extra_flags) if result: Print('[ FAILED ] %s: %s' % (result, test)) ERRORS.put((result, test)) def RunSuite(config, files, extra_flags, errors): """Run a collection of benchmarks.""" global ERRORS, CONCURRENCY Banner('running %d tests' % (len(files))) pool = multiprocessing.Pool(processes=CONCURRENCY) # create a list of run arguments to map over argslist = [(num, len(files), config, test, extra_flags) for num, test in enumerate(files)] # let the process pool handle the test assignments, order doesn't matter pool.map(RunTest, argslist) while not ERRORS.empty(): phase, test = ERRORS.get() errors[phase].append(test) def FilterOutExcludedTests(files, exclude): return [f for f in files if not os.path.basename(f) in exclude] def main(argv): files = ParseCommandLineArgs(argv) if not CFG: print('ERROR: you must specify a toolchain-config using --config=<config>') print('Available configs are: ') print('\n'.join(toolchain_config.TOOLCHAIN_CONFIGS.keys())) print() return -1 global TMP_PREFIX global APPEND TMP_PREFIX = TMP_PREFIX + CFG Banner('Config: %s' % CFG) config = toolchain_config.TOOLCHAIN_CONFIGS[CFG] ParseExcludeFiles(config.GetAttributes()) for tag, value in APPEND: config.Append(tag, value) ParseAppendFiles() config.SanityCheck() Print('TMP_PREFIX: %s' % TMP_PREFIX) # initialize error stats errors = {} for phase in config.GetPhases(): errors[phase] = [] Print('Tests before filtering %d' % len(files)) if not CHECK_EXCLUDES: files = FilterOutExcludedTests(files, EXCLUDE) Print('Tests after filtering %d' % len(files)) try: RunSuite(config, files, {}, errors) finally: RemoveTempFiles() # print error report USED_EXCLUDES = {} num_errors = 0 for k in errors: lst = errors[k] if not lst: continue Banner('%d failures in config %s phase %s' % (len(lst), CFG, k)) for e in lst: if os.path.basename(e) in EXCLUDE: USED_EXCLUDES[os.path.basename(e)] = None continue Print(e) num_errors += 1 if CHECK_EXCLUDES: Banner('Unnecessary excludes:') for e in EXCLUDE: if e not in USED_EXCLUDES: Print(e + ' (' + EXCLUDE[e] + ')') return num_errors > 0 if __name__ == '__main__': sys.exit(main(sys.argv))
'tmp=',
random_line_split
toolchain_tester.py
#!/usr/bin/env python # Copyright (c) 2011 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Simple test suite for toolchains espcially llvm arm toolchains. Sample invocations tools/toolchain_tester/toolchain_tester.py [options]+ test1.c test2.c ... where options are --config <config> --append <tag>=<value> --append_file=<filename> --verbose --show_console --exclude=<filename> --tmp=<path> --check_excludes --concurrency=<number> e.g. --append "CFLAGS:-lsupc++" will enable C++ eh support NOTE: the location of tmp files is intentionally hardcoded, so you can only run one instance of this at a time. """ from __future__ import print_function import getopt import glob import multiprocessing import os import shlex import subprocess import sys import time import toolchain_config # ====================================================================== # Options # ====================================================================== # list of streams being logged to (both normal and verbose output) REPORT_STREAMS = [sys.stdout] # max time (secs) to wait for command any command to complete TIMEOUT = 120 # enable verbose output, e.g. commands being executed VERBOSE = 0 # prefix for temporary files TMP_PREFIX = '/tmp/tc_test_' # show command output (stdout/stderr) SHOW_CONSOLE = 1 # append these settings to config APPEND = [] # append these settings to config, for a given test (specified by APPEND_FILES) APPEND_PER_TEST = {} # Files listing the APPEND_PER_TEST entries. APPEND_FILES = [] # exclude these tests EXCLUDE = {} # check whether excludes are still necessary CHECK_EXCLUDES = 0 # Files listing excluded tests EXCLUDE_FILES = [] # module with settings for compiler, etc. CFG = None # Number of simultaneous test processes CONCURRENCY = 1 # Child processes push failing test results onto this queue ERRORS = multiprocessing.Queue() # ====================================================================== # Hook print to we can print to both stdout and a file def Print(message):
# ====================================================================== def Banner(message): Print('=' * 70) Print(message) Print('=' * 70) # ====================================================================== def RunCommand(cmd, always_dump_stdout_stderr): """Run a shell command given as an argv style vector.""" if VERBOSE: Print(str(cmd)) Print(" ".join(cmd)) start = time.time() p = subprocess.Popen(cmd, bufsize=1000*1000, stderr=subprocess.PIPE, stdout=subprocess.PIPE) while p.poll() is None: time.sleep(0.1) now = time.time() if now - start > TIMEOUT: Print('Error: timeout') Print('Killing pid %d' % p.pid) os.waitpid(-1, os.WNOHANG) return -1 stdout = p.stdout.read() stderr = p.stderr.read() retcode = p.wait() if retcode != 0: Print('Error: command failed %d %s' % (retcode, ' '.join(cmd))) always_dump_stdout_stderr = True if always_dump_stdout_stderr: Print(stderr) Print(stdout) return retcode def RemoveTempFiles(): global TMP_PREFIX for f in glob.glob(TMP_PREFIX + '*'): os.remove(f) def MakeExecutableCustom(config, test, extra): global TMP_PREFIX global SHOW_CONSOLE d = extra.copy() d['tmp'] = (TMP_PREFIX + '_' + os.path.basename(os.path.dirname(test)) + '_' + os.path.basename(test)) d['src'] = test for phase, command in config.GetCommands(d): command = shlex.split(command) try: retcode = RunCommand(command, SHOW_CONSOLE) except Exception as err: Print("cannot run phase %s: %s" % (phase, str(err))) return phase if retcode: return phase # success return '' def ParseExcludeFiles(config_attributes): ''' Parse the files containing tests to exclude (i.e. expected fails). Each line may contain a comma-separated list of attributes restricting the test configurations which are expected to fail. (e.g. architecture or optimization level). A test is only excluded if the configuration has all the attributes specified in the exclude line. Lines which have no attributes will match everything, and lines which specify only one attribute (e.g. architecture) will match all configurations with that attributed (e.g. both opt levels with that architecture) ''' for excludefile in EXCLUDE_FILES: f = open(excludefile) for line in f: line = line.strip() if not line: continue if line.startswith('#'): continue tokens = line.split() if len(tokens) > 1: attributes = set(tokens[1].split(',')) if not attributes.issubset(config_attributes): continue test = tokens[0] else: test = line if test in EXCLUDE: Print('ERROR: duplicate exclude: [%s]' % line) EXCLUDE[test] = excludefile f.close() Print('Size of excludes now: %d' % len(EXCLUDE)) def ParseAppendFiles(): """Parse the file contain a list of test + CFLAGS to append for that test.""" for append_file in APPEND_FILES: f = open(append_file) for line in f: line = line.strip() if not line: continue if line.startswith('#'): continue tokens = line.split(',') test = tokens[0] to_append = {} for t in tokens[1:]: tag, value = t.split(':') if tag in to_append: to_append[tag] = to_append[tag] + ' ' + value else: to_append[tag] = value if test in APPEND_PER_TEST: raise Exception('Duplicate append/flags for test %s (old %s, new %s)' % (test, APPEND_PER_TEST[test], to_append)) APPEND_PER_TEST[test] = to_append f.close() def ParseCommandLineArgs(argv): """Process command line options and return the unprocessed left overs.""" global VERBOSE, COMPILE_MODE, RUN_MODE, TMP_PREFIX global CFG, APPEND, SHOW_CONSOLE, CHECK_EXCLUDES, CONCURRENCY try: opts, args = getopt.getopt(argv[1:], '', ['verbose', 'show_console', 'append=', 'append_file=', 'config=', 'exclude=', 'check_excludes', 'tmp=', 'concurrency=']) except getopt.GetoptError as err: Print(str(err)) # will print something like 'option -a not recognized' sys.exit(-1) for o, a in opts: # strip the leading '--' o = o[2:] if o == 'verbose': VERBOSE = 1 elif o == 'show_console': SHOW_CONSOLE = 1 elif o == 'check_excludes': CHECK_EXCLUDES = 1 elif o == 'tmp': TMP_PREFIX = a elif o == 'exclude': # Parsing of exclude files must happen after we know the current config EXCLUDE_FILES.append(a) elif o == 'append': tag, value = a.split(":", 1) APPEND.append((tag, value)) elif o == 'append_file': APPEND_FILES.append(a) elif o == 'config': CFG = a elif o == 'concurrency': CONCURRENCY = int(a) else: Print('ERROR: bad commandline arg: %s' % o) sys.exit(-1) # return the unprocessed options, i.e. the command return args def RunTest(args): num, total, config, test, extra_flags = args base_test_name = os.path.basename(test) extra_flags = extra_flags.copy() toolchain_config.AppendDictionary(extra_flags, APPEND_PER_TEST.get(base_test_name, {})) Print('Running %d/%d: %s' % (num + 1, total, base_test_name)) try: result = MakeExecutableCustom(config, test, extra_flags) except KeyboardInterrupt: # Swallow the keyboard interrupt in the child. Otherwise the parent # hangs trying to join it. pass if result and config.IsFlaky(): # TODO(dschuff): deflake qemu or switch to hardware # BUG=http://code.google.com/p/nativeclient/issues/detail?id=2197 # try it again, and only fail on consecutive failures Print('Retrying ' + base_test_name) result = MakeExecutableCustom(config, test, extra_flags) if result: Print('[ FAILED ] %s: %s' % (result, test)) ERRORS.put((result, test)) def RunSuite(config, files, extra_flags, errors): """Run a collection of benchmarks.""" global ERRORS, CONCURRENCY Banner('running %d tests' % (len(files))) pool = multiprocessing.Pool(processes=CONCURRENCY) # create a list of run arguments to map over argslist = [(num, len(files), config, test, extra_flags) for num, test in enumerate(files)] # let the process pool handle the test assignments, order doesn't matter pool.map(RunTest, argslist) while not ERRORS.empty(): phase, test = ERRORS.get() errors[phase].append(test) def FilterOutExcludedTests(files, exclude): return [f for f in files if not os.path.basename(f) in exclude] def main(argv): files = ParseCommandLineArgs(argv) if not CFG: print('ERROR: you must specify a toolchain-config using --config=<config>') print('Available configs are: ') print('\n'.join(toolchain_config.TOOLCHAIN_CONFIGS.keys())) print() return -1 global TMP_PREFIX global APPEND TMP_PREFIX = TMP_PREFIX + CFG Banner('Config: %s' % CFG) config = toolchain_config.TOOLCHAIN_CONFIGS[CFG] ParseExcludeFiles(config.GetAttributes()) for tag, value in APPEND: config.Append(tag, value) ParseAppendFiles() config.SanityCheck() Print('TMP_PREFIX: %s' % TMP_PREFIX) # initialize error stats errors = {} for phase in config.GetPhases(): errors[phase] = [] Print('Tests before filtering %d' % len(files)) if not CHECK_EXCLUDES: files = FilterOutExcludedTests(files, EXCLUDE) Print('Tests after filtering %d' % len(files)) try: RunSuite(config, files, {}, errors) finally: RemoveTempFiles() # print error report USED_EXCLUDES = {} num_errors = 0 for k in errors: lst = errors[k] if not lst: continue Banner('%d failures in config %s phase %s' % (len(lst), CFG, k)) for e in lst: if os.path.basename(e) in EXCLUDE: USED_EXCLUDES[os.path.basename(e)] = None continue Print(e) num_errors += 1 if CHECK_EXCLUDES: Banner('Unnecessary excludes:') for e in EXCLUDE: if e not in USED_EXCLUDES: Print(e + ' (' + EXCLUDE[e] + ')') return num_errors > 0 if __name__ == '__main__': sys.exit(main(sys.argv))
for s in REPORT_STREAMS: print(message, file=s)
identifier_body
toolchain_tester.py
#!/usr/bin/env python # Copyright (c) 2011 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Simple test suite for toolchains espcially llvm arm toolchains. Sample invocations tools/toolchain_tester/toolchain_tester.py [options]+ test1.c test2.c ... where options are --config <config> --append <tag>=<value> --append_file=<filename> --verbose --show_console --exclude=<filename> --tmp=<path> --check_excludes --concurrency=<number> e.g. --append "CFLAGS:-lsupc++" will enable C++ eh support NOTE: the location of tmp files is intentionally hardcoded, so you can only run one instance of this at a time. """ from __future__ import print_function import getopt import glob import multiprocessing import os import shlex import subprocess import sys import time import toolchain_config # ====================================================================== # Options # ====================================================================== # list of streams being logged to (both normal and verbose output) REPORT_STREAMS = [sys.stdout] # max time (secs) to wait for command any command to complete TIMEOUT = 120 # enable verbose output, e.g. commands being executed VERBOSE = 0 # prefix for temporary files TMP_PREFIX = '/tmp/tc_test_' # show command output (stdout/stderr) SHOW_CONSOLE = 1 # append these settings to config APPEND = [] # append these settings to config, for a given test (specified by APPEND_FILES) APPEND_PER_TEST = {} # Files listing the APPEND_PER_TEST entries. APPEND_FILES = [] # exclude these tests EXCLUDE = {} # check whether excludes are still necessary CHECK_EXCLUDES = 0 # Files listing excluded tests EXCLUDE_FILES = [] # module with settings for compiler, etc. CFG = None # Number of simultaneous test processes CONCURRENCY = 1 # Child processes push failing test results onto this queue ERRORS = multiprocessing.Queue() # ====================================================================== # Hook print to we can print to both stdout and a file def Print(message): for s in REPORT_STREAMS: print(message, file=s) # ====================================================================== def Banner(message): Print('=' * 70) Print(message) Print('=' * 70) # ====================================================================== def RunCommand(cmd, always_dump_stdout_stderr): """Run a shell command given as an argv style vector.""" if VERBOSE: Print(str(cmd)) Print(" ".join(cmd)) start = time.time() p = subprocess.Popen(cmd, bufsize=1000*1000, stderr=subprocess.PIPE, stdout=subprocess.PIPE) while p.poll() is None: time.sleep(0.1) now = time.time() if now - start > TIMEOUT: Print('Error: timeout') Print('Killing pid %d' % p.pid) os.waitpid(-1, os.WNOHANG) return -1 stdout = p.stdout.read() stderr = p.stderr.read() retcode = p.wait() if retcode != 0: Print('Error: command failed %d %s' % (retcode, ' '.join(cmd))) always_dump_stdout_stderr = True if always_dump_stdout_stderr: Print(stderr) Print(stdout) return retcode def RemoveTempFiles(): global TMP_PREFIX for f in glob.glob(TMP_PREFIX + '*'): os.remove(f) def MakeExecutableCustom(config, test, extra): global TMP_PREFIX global SHOW_CONSOLE d = extra.copy() d['tmp'] = (TMP_PREFIX + '_' + os.path.basename(os.path.dirname(test)) + '_' + os.path.basename(test)) d['src'] = test for phase, command in config.GetCommands(d): command = shlex.split(command) try: retcode = RunCommand(command, SHOW_CONSOLE) except Exception as err: Print("cannot run phase %s: %s" % (phase, str(err))) return phase if retcode: return phase # success return '' def ParseExcludeFiles(config_attributes): ''' Parse the files containing tests to exclude (i.e. expected fails). Each line may contain a comma-separated list of attributes restricting the test configurations which are expected to fail. (e.g. architecture or optimization level). A test is only excluded if the configuration has all the attributes specified in the exclude line. Lines which have no attributes will match everything, and lines which specify only one attribute (e.g. architecture) will match all configurations with that attributed (e.g. both opt levels with that architecture) ''' for excludefile in EXCLUDE_FILES: f = open(excludefile) for line in f: line = line.strip() if not line: continue if line.startswith('#'): continue tokens = line.split() if len(tokens) > 1: attributes = set(tokens[1].split(',')) if not attributes.issubset(config_attributes): continue test = tokens[0] else: test = line if test in EXCLUDE: Print('ERROR: duplicate exclude: [%s]' % line) EXCLUDE[test] = excludefile f.close() Print('Size of excludes now: %d' % len(EXCLUDE)) def ParseAppendFiles(): """Parse the file contain a list of test + CFLAGS to append for that test.""" for append_file in APPEND_FILES: f = open(append_file) for line in f: line = line.strip() if not line: continue if line.startswith('#'): continue tokens = line.split(',') test = tokens[0] to_append = {} for t in tokens[1:]: tag, value = t.split(':') if tag in to_append: to_append[tag] = to_append[tag] + ' ' + value else: to_append[tag] = value if test in APPEND_PER_TEST: raise Exception('Duplicate append/flags for test %s (old %s, new %s)' % (test, APPEND_PER_TEST[test], to_append)) APPEND_PER_TEST[test] = to_append f.close() def ParseCommandLineArgs(argv): """Process command line options and return the unprocessed left overs.""" global VERBOSE, COMPILE_MODE, RUN_MODE, TMP_PREFIX global CFG, APPEND, SHOW_CONSOLE, CHECK_EXCLUDES, CONCURRENCY try: opts, args = getopt.getopt(argv[1:], '', ['verbose', 'show_console', 'append=', 'append_file=', 'config=', 'exclude=', 'check_excludes', 'tmp=', 'concurrency=']) except getopt.GetoptError as err: Print(str(err)) # will print something like 'option -a not recognized' sys.exit(-1) for o, a in opts: # strip the leading '--' o = o[2:] if o == 'verbose': VERBOSE = 1 elif o == 'show_console': SHOW_CONSOLE = 1 elif o == 'check_excludes': CHECK_EXCLUDES = 1 elif o == 'tmp': TMP_PREFIX = a elif o == 'exclude': # Parsing of exclude files must happen after we know the current config EXCLUDE_FILES.append(a) elif o == 'append': tag, value = a.split(":", 1) APPEND.append((tag, value)) elif o == 'append_file': APPEND_FILES.append(a) elif o == 'config': CFG = a elif o == 'concurrency': CONCURRENCY = int(a) else: Print('ERROR: bad commandline arg: %s' % o) sys.exit(-1) # return the unprocessed options, i.e. the command return args def
(args): num, total, config, test, extra_flags = args base_test_name = os.path.basename(test) extra_flags = extra_flags.copy() toolchain_config.AppendDictionary(extra_flags, APPEND_PER_TEST.get(base_test_name, {})) Print('Running %d/%d: %s' % (num + 1, total, base_test_name)) try: result = MakeExecutableCustom(config, test, extra_flags) except KeyboardInterrupt: # Swallow the keyboard interrupt in the child. Otherwise the parent # hangs trying to join it. pass if result and config.IsFlaky(): # TODO(dschuff): deflake qemu or switch to hardware # BUG=http://code.google.com/p/nativeclient/issues/detail?id=2197 # try it again, and only fail on consecutive failures Print('Retrying ' + base_test_name) result = MakeExecutableCustom(config, test, extra_flags) if result: Print('[ FAILED ] %s: %s' % (result, test)) ERRORS.put((result, test)) def RunSuite(config, files, extra_flags, errors): """Run a collection of benchmarks.""" global ERRORS, CONCURRENCY Banner('running %d tests' % (len(files))) pool = multiprocessing.Pool(processes=CONCURRENCY) # create a list of run arguments to map over argslist = [(num, len(files), config, test, extra_flags) for num, test in enumerate(files)] # let the process pool handle the test assignments, order doesn't matter pool.map(RunTest, argslist) while not ERRORS.empty(): phase, test = ERRORS.get() errors[phase].append(test) def FilterOutExcludedTests(files, exclude): return [f for f in files if not os.path.basename(f) in exclude] def main(argv): files = ParseCommandLineArgs(argv) if not CFG: print('ERROR: you must specify a toolchain-config using --config=<config>') print('Available configs are: ') print('\n'.join(toolchain_config.TOOLCHAIN_CONFIGS.keys())) print() return -1 global TMP_PREFIX global APPEND TMP_PREFIX = TMP_PREFIX + CFG Banner('Config: %s' % CFG) config = toolchain_config.TOOLCHAIN_CONFIGS[CFG] ParseExcludeFiles(config.GetAttributes()) for tag, value in APPEND: config.Append(tag, value) ParseAppendFiles() config.SanityCheck() Print('TMP_PREFIX: %s' % TMP_PREFIX) # initialize error stats errors = {} for phase in config.GetPhases(): errors[phase] = [] Print('Tests before filtering %d' % len(files)) if not CHECK_EXCLUDES: files = FilterOutExcludedTests(files, EXCLUDE) Print('Tests after filtering %d' % len(files)) try: RunSuite(config, files, {}, errors) finally: RemoveTempFiles() # print error report USED_EXCLUDES = {} num_errors = 0 for k in errors: lst = errors[k] if not lst: continue Banner('%d failures in config %s phase %s' % (len(lst), CFG, k)) for e in lst: if os.path.basename(e) in EXCLUDE: USED_EXCLUDES[os.path.basename(e)] = None continue Print(e) num_errors += 1 if CHECK_EXCLUDES: Banner('Unnecessary excludes:') for e in EXCLUDE: if e not in USED_EXCLUDES: Print(e + ' (' + EXCLUDE[e] + ')') return num_errors > 0 if __name__ == '__main__': sys.exit(main(sys.argv))
RunTest
identifier_name
toolchain_tester.py
#!/usr/bin/env python # Copyright (c) 2011 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Simple test suite for toolchains espcially llvm arm toolchains. Sample invocations tools/toolchain_tester/toolchain_tester.py [options]+ test1.c test2.c ... where options are --config <config> --append <tag>=<value> --append_file=<filename> --verbose --show_console --exclude=<filename> --tmp=<path> --check_excludes --concurrency=<number> e.g. --append "CFLAGS:-lsupc++" will enable C++ eh support NOTE: the location of tmp files is intentionally hardcoded, so you can only run one instance of this at a time. """ from __future__ import print_function import getopt import glob import multiprocessing import os import shlex import subprocess import sys import time import toolchain_config # ====================================================================== # Options # ====================================================================== # list of streams being logged to (both normal and verbose output) REPORT_STREAMS = [sys.stdout] # max time (secs) to wait for command any command to complete TIMEOUT = 120 # enable verbose output, e.g. commands being executed VERBOSE = 0 # prefix for temporary files TMP_PREFIX = '/tmp/tc_test_' # show command output (stdout/stderr) SHOW_CONSOLE = 1 # append these settings to config APPEND = [] # append these settings to config, for a given test (specified by APPEND_FILES) APPEND_PER_TEST = {} # Files listing the APPEND_PER_TEST entries. APPEND_FILES = [] # exclude these tests EXCLUDE = {} # check whether excludes are still necessary CHECK_EXCLUDES = 0 # Files listing excluded tests EXCLUDE_FILES = [] # module with settings for compiler, etc. CFG = None # Number of simultaneous test processes CONCURRENCY = 1 # Child processes push failing test results onto this queue ERRORS = multiprocessing.Queue() # ====================================================================== # Hook print to we can print to both stdout and a file def Print(message): for s in REPORT_STREAMS: print(message, file=s) # ====================================================================== def Banner(message): Print('=' * 70) Print(message) Print('=' * 70) # ====================================================================== def RunCommand(cmd, always_dump_stdout_stderr): """Run a shell command given as an argv style vector.""" if VERBOSE: Print(str(cmd)) Print(" ".join(cmd)) start = time.time() p = subprocess.Popen(cmd, bufsize=1000*1000, stderr=subprocess.PIPE, stdout=subprocess.PIPE) while p.poll() is None: time.sleep(0.1) now = time.time() if now - start > TIMEOUT: Print('Error: timeout') Print('Killing pid %d' % p.pid) os.waitpid(-1, os.WNOHANG) return -1 stdout = p.stdout.read() stderr = p.stderr.read() retcode = p.wait() if retcode != 0: Print('Error: command failed %d %s' % (retcode, ' '.join(cmd))) always_dump_stdout_stderr = True if always_dump_stdout_stderr: Print(stderr) Print(stdout) return retcode def RemoveTempFiles(): global TMP_PREFIX for f in glob.glob(TMP_PREFIX + '*'): os.remove(f) def MakeExecutableCustom(config, test, extra): global TMP_PREFIX global SHOW_CONSOLE d = extra.copy() d['tmp'] = (TMP_PREFIX + '_' + os.path.basename(os.path.dirname(test)) + '_' + os.path.basename(test)) d['src'] = test for phase, command in config.GetCommands(d): command = shlex.split(command) try: retcode = RunCommand(command, SHOW_CONSOLE) except Exception as err: Print("cannot run phase %s: %s" % (phase, str(err))) return phase if retcode: return phase # success return '' def ParseExcludeFiles(config_attributes): ''' Parse the files containing tests to exclude (i.e. expected fails). Each line may contain a comma-separated list of attributes restricting the test configurations which are expected to fail. (e.g. architecture or optimization level). A test is only excluded if the configuration has all the attributes specified in the exclude line. Lines which have no attributes will match everything, and lines which specify only one attribute (e.g. architecture) will match all configurations with that attributed (e.g. both opt levels with that architecture) ''' for excludefile in EXCLUDE_FILES: f = open(excludefile) for line in f: line = line.strip() if not line: continue if line.startswith('#'): continue tokens = line.split() if len(tokens) > 1:
else: test = line if test in EXCLUDE: Print('ERROR: duplicate exclude: [%s]' % line) EXCLUDE[test] = excludefile f.close() Print('Size of excludes now: %d' % len(EXCLUDE)) def ParseAppendFiles(): """Parse the file contain a list of test + CFLAGS to append for that test.""" for append_file in APPEND_FILES: f = open(append_file) for line in f: line = line.strip() if not line: continue if line.startswith('#'): continue tokens = line.split(',') test = tokens[0] to_append = {} for t in tokens[1:]: tag, value = t.split(':') if tag in to_append: to_append[tag] = to_append[tag] + ' ' + value else: to_append[tag] = value if test in APPEND_PER_TEST: raise Exception('Duplicate append/flags for test %s (old %s, new %s)' % (test, APPEND_PER_TEST[test], to_append)) APPEND_PER_TEST[test] = to_append f.close() def ParseCommandLineArgs(argv): """Process command line options and return the unprocessed left overs.""" global VERBOSE, COMPILE_MODE, RUN_MODE, TMP_PREFIX global CFG, APPEND, SHOW_CONSOLE, CHECK_EXCLUDES, CONCURRENCY try: opts, args = getopt.getopt(argv[1:], '', ['verbose', 'show_console', 'append=', 'append_file=', 'config=', 'exclude=', 'check_excludes', 'tmp=', 'concurrency=']) except getopt.GetoptError as err: Print(str(err)) # will print something like 'option -a not recognized' sys.exit(-1) for o, a in opts: # strip the leading '--' o = o[2:] if o == 'verbose': VERBOSE = 1 elif o == 'show_console': SHOW_CONSOLE = 1 elif o == 'check_excludes': CHECK_EXCLUDES = 1 elif o == 'tmp': TMP_PREFIX = a elif o == 'exclude': # Parsing of exclude files must happen after we know the current config EXCLUDE_FILES.append(a) elif o == 'append': tag, value = a.split(":", 1) APPEND.append((tag, value)) elif o == 'append_file': APPEND_FILES.append(a) elif o == 'config': CFG = a elif o == 'concurrency': CONCURRENCY = int(a) else: Print('ERROR: bad commandline arg: %s' % o) sys.exit(-1) # return the unprocessed options, i.e. the command return args def RunTest(args): num, total, config, test, extra_flags = args base_test_name = os.path.basename(test) extra_flags = extra_flags.copy() toolchain_config.AppendDictionary(extra_flags, APPEND_PER_TEST.get(base_test_name, {})) Print('Running %d/%d: %s' % (num + 1, total, base_test_name)) try: result = MakeExecutableCustom(config, test, extra_flags) except KeyboardInterrupt: # Swallow the keyboard interrupt in the child. Otherwise the parent # hangs trying to join it. pass if result and config.IsFlaky(): # TODO(dschuff): deflake qemu or switch to hardware # BUG=http://code.google.com/p/nativeclient/issues/detail?id=2197 # try it again, and only fail on consecutive failures Print('Retrying ' + base_test_name) result = MakeExecutableCustom(config, test, extra_flags) if result: Print('[ FAILED ] %s: %s' % (result, test)) ERRORS.put((result, test)) def RunSuite(config, files, extra_flags, errors): """Run a collection of benchmarks.""" global ERRORS, CONCURRENCY Banner('running %d tests' % (len(files))) pool = multiprocessing.Pool(processes=CONCURRENCY) # create a list of run arguments to map over argslist = [(num, len(files), config, test, extra_flags) for num, test in enumerate(files)] # let the process pool handle the test assignments, order doesn't matter pool.map(RunTest, argslist) while not ERRORS.empty(): phase, test = ERRORS.get() errors[phase].append(test) def FilterOutExcludedTests(files, exclude): return [f for f in files if not os.path.basename(f) in exclude] def main(argv): files = ParseCommandLineArgs(argv) if not CFG: print('ERROR: you must specify a toolchain-config using --config=<config>') print('Available configs are: ') print('\n'.join(toolchain_config.TOOLCHAIN_CONFIGS.keys())) print() return -1 global TMP_PREFIX global APPEND TMP_PREFIX = TMP_PREFIX + CFG Banner('Config: %s' % CFG) config = toolchain_config.TOOLCHAIN_CONFIGS[CFG] ParseExcludeFiles(config.GetAttributes()) for tag, value in APPEND: config.Append(tag, value) ParseAppendFiles() config.SanityCheck() Print('TMP_PREFIX: %s' % TMP_PREFIX) # initialize error stats errors = {} for phase in config.GetPhases(): errors[phase] = [] Print('Tests before filtering %d' % len(files)) if not CHECK_EXCLUDES: files = FilterOutExcludedTests(files, EXCLUDE) Print('Tests after filtering %d' % len(files)) try: RunSuite(config, files, {}, errors) finally: RemoveTempFiles() # print error report USED_EXCLUDES = {} num_errors = 0 for k in errors: lst = errors[k] if not lst: continue Banner('%d failures in config %s phase %s' % (len(lst), CFG, k)) for e in lst: if os.path.basename(e) in EXCLUDE: USED_EXCLUDES[os.path.basename(e)] = None continue Print(e) num_errors += 1 if CHECK_EXCLUDES: Banner('Unnecessary excludes:') for e in EXCLUDE: if e not in USED_EXCLUDES: Print(e + ' (' + EXCLUDE[e] + ')') return num_errors > 0 if __name__ == '__main__': sys.exit(main(sys.argv))
attributes = set(tokens[1].split(',')) if not attributes.issubset(config_attributes): continue test = tokens[0]
conditional_block
cipher_handout.py
import numpy as np HILL_KEY = [[21, 109, 119, 23, 88, 15, 116, 66], [22, 119, 70, 118, 111, 82, 121, 98], [79, 86, 2, 96, 90, 54, 95, 83], [22, 100, 113, 122, 92, 6, 52, 60], [1, 9, 9, 4, 112, 13, 26, 74], [3, 100, 92, 83, 51, 122, 102, 63], [71, 110, 92, 74, 26, 96, 92, 24], [30, 10, 85, 92, 47, 91, 114, 108]] HILL_KEY_REVERSE = [[138, 124, 28, 104, 136, 176, 193, 182], [65, 229, 101, 214, 103, 57, 4, 224], [140, 138, 214, 71, 46, 62, 148, 184], [77, 64, 202, 44, 119, 246, 60, 86], [69, 173, 41, 8, 106, 175, 255, 119], [105, 45, 131, 23, 116, 193, 29, 114], [190, 79, 82, 26, 81, 22, 187, 253], [70, 99, 51, 2, 221, 248, 152, 59]] DES_KEY = [65, 66, 67, 68, 69, 70, 71, 72] def get_content(): content = input("Enter the word to encrypt:") return content def string_to_ascii_list(content): out = [] for letter in content: out.append(ord(letter)) return out def ascii_list_to_bin_list(asciiList, binLen=8): out = [] for ascii in asciiList: itemBin = bin(ascii) for i in range(binLen + 2 - len(itemBin)): out.append(0) for b in itemBin[2:]: out.append(int(b)) return out def bin_to_string(binList, binFormatLen=8): out = "" for i in range(int(len(binList) / binFormatLen)): ascii = "" for j in range(binFormatLen): ascii += str(binList[i * binFormatLen + j]) out += chr(int(ascii, 2)) return out def ascii_list_to_string(list): str = "" for item in list: str += chr(item) return str def padding_content(content, blocksize=64): for i in range(int((len(content) - 1) / blocksize + 1) * blocksize - len(content)): content.append(0) return content def drop_padding(content): for i in range(len(content)): if content[i] == 0: return content[:i] return content def content_to_block_array(content): contentBlockArray = [] for i in range(0, int(len(content) / 64)): contentBlock = [] for j in range(0, 8): contentLine = [] for k in range(0, 8): contentLine.append(content[i * 8 * 8 + j * 8 + k]) contentBlock.append(contentLine) contentBlockArray.append(contentBlock) return contentBlockArray def content_to_des_block_array(content): contentBlockArray = [] for i in range(0, int(len(content) / 64)): contentBlock = [] for j in range(0, 64): contentBlock.append(content[i * 64 + j]) contentBlockArray.append(contentBlock) return contentBlockArray def block_array_to_content(contentBlockArray, block_height=8, block_length=8): content = [] for contentBlock in contentBlockArray: for contentLine in contentBlock: for contentItem in contentLine: content.append(contentItem) return content def des_block_array_to_content(contentBlockArray): content = [] for contentBlock in contentBlockArray:
content.append(contentLine) return content def block_to_content(contentBlock, block_height=8, block_length=8): content = [] for contentLine in contentBlock: for contentItem in contentLine: content.append(contentItem) return content def hill_encrypt_block_array(contentBlockArray, keyBlock, field): cipherBlockArray = [] keyBlockNum = 0 for contentBlock in contentBlockArray: outMetrix = hill_encrypt_block(contentBlock, keyBlock, field) cipherBlockArray.append(outMetrix) return cipherBlockArray def hill_decrypt_block_array(contentBlockArray, keyBlock, field): plainBlockArray = [] for contentBlock in contentBlockArray: outMetrix = hill_decrypt_block(contentBlock, keyBlock, field) plainBlockArray.append(outMetrix) return plainBlockArray def hill_encrypt_block(contentBlock, keyBlock, field): cipherBlock = [] contentArray = np.array(contentBlock) keyArray = np.array(keyBlock) cipherBlock = np.ndarray.tolist(np.dot(contentArray, keyArray) % field) return cipherBlock def hill_decrypt_block(contentBlock, keyBlock, field): plainBlock = [] contentArray = np.array(contentBlock) keyArray = np.array(keyBlock) plainBlock = np.ndarray.tolist(np.dot(contentArray, keyArray) % field) return plainBlock def des_string_proc(content): return content_to_des_block_array(padding_content(ascii_list_to_bin_list(string_to_ascii_list(content)))) def des_ascii_list_proc(content, formatBase=8): return content_to_des_block_array(padding_content(ascii_list_to_bin_list(content, formatBase))) # def des_encypt_block_array(content,keyBlock): # cipherBlockArray = [] # contentBlockArray=des_content_proc(content) # keyBlockNum = 0 # for contentBlock in contentBlockArray: # outMetrix = des_encypt_block(contentBlock, keyBlock) # cipherBlockArray.append(outMetrix) # return cipherBlockArray def des_encypt_block_array(contentBlockArray, keyBlock, keyBlockFormatBase=8): cipherBlockArray = [] subKeyArray = get_sub_key(keyBlock, keyBlockFormatBase) file = open("debug.txt", "a") file.write("\n加密子密钥:\n") file.writelines(str(subKeyArray)) file.close() for contentBlock in contentBlockArray: outMetrix = des_encypt_block(contentBlock, subKeyArray, keyBlockFormatBase) cipherBlockArray.append(outMetrix) return cipherBlockArray def des_decrypt_block_array(contentBlockArray, keyBlock, keyBlockFormatBase=8): cipherBlockArray = [] subKeyArray = get_sub_key(keyBlock, keyBlockFormatBase) subDecryptKeyArray = subKeyArray[::-1] file = open("debug.txt", "a") file.write("\n解密子密钥:\n") file.writelines(str(subDecryptKeyArray)) file.close() for contentBlock in contentBlockArray: outMetrix = des_encypt_block(contentBlock, subDecryptKeyArray, keyBlockFormatBase) cipherBlockArray.append(outMetrix) return cipherBlockArray def list_xor(list1, list2): out = [] for i in range(len(list1)): out.append(list1[i] ^ list2[i]) return out # def des_key_proc(keyBlock): # return ascii_list_to_bin_list(keyBlock) def get_sub_key(keyBlock, keyBlockFormatBase=8): key = ascii_list_to_bin_list(keyBlock, keyBlockFormatBase) file = open("debug.txt", "a") file.write("\n密钥:\n") file.writelines(str(key)) file.close() key56 = des_key_do_pc_1(key) keyBlock = des_key_do_shift_pc_2(key56) return keyBlock def des_do_extend_permutation(content32List): '''扩展置换:将32位输入置换成48位输出。''' '''扩展置置换目标是IP置换后获得的右半部分R0,将32位输入扩展为48位(分为4位×8组)输出。''' E = [32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1] return [content32List[E[i] - 1] for i in range(48)] def des_key_do_pc_1(keyList): '''密钥置换:不考虑每个字节的第8位,DES的密钥由64位减至56位,每个字节的第8位作为奇偶校验位。''' PC = [ 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4 ] return [keyList[PC[i] - 1] for i in range(56)] def des_key_do_shift_pc_2(keyList): '''在DES的每一轮中,从56位密钥产生出不同的48位子密钥''' '''该处输出为所有轮次的子密钥''' PC = [14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32] MOV = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1] result = [] key56=keyList for i in range(16): # 每28位为一部分,分别进行循环左移 key0 = des_do_shift(key56[:28], MOV[i]) key1 = des_do_shift(key56[28:], MOV[i]) key56 = key0 + key1 # 对56位密钥进行 PC-2 变换,将其压缩为48位 key48 = [key56[PC[j] - 1] for j in range(48)] result.append(key48) return result def des_do_shift(keyList, mov): return keyList[mov:] + keyList[:mov] def des_do_s_box(list48): '''S-盒置换:将48位输入均分成长度为6的8个小组,每个小组按顺序进入相应的S盒各得到4位输出,返回合并后的32位结果。''' # S 盒 S_BOX = [[ [14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7], [0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8], [4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0], [15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13], ], [ [15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10], [3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5], [0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15], [13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9], ], [ [10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8], [13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1], [13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7], [1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12], ], [ [7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15], [13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9], [10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4], [3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14], ], [ [2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9], [14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6], [4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14], [11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3], ], [ [12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11], [10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8], [9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6], [4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13], ], [ [4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1], [13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6], [1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2], [6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12], ], [ [13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7], [1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2], [7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8], [2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11], ]] result = [] for i in range(0, 8): temp = list48[i * 6:i * 6 + 6] row = int(str(temp[0]) + str(temp[-1]), 2) column = int(str(temp[1]) + str(temp[2]) + str(temp[3]) + str(temp[4]), 2) letter = S_BOX[i][row][column] result.append(letter) return ascii_list_to_bin_list(result, 4) def des_do_p_box(list32): P_BOX = [16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25] return [list32[P_BOX[i] - 1] for i in range(32)] def des_do_right32(left32, right32, subKey): right48 = des_do_extend_permutation(right32) right48tmp = list_xor(right48, subKey) right32tmp = des_do_s_box(right48tmp) right32tmp = des_do_p_box(right32tmp) right32 = list_xor(left32, right32tmp) return right32 def des_encypt_block(contentBlock, subKeyArray, keyBlockFormatBase=8): # step1 '''初始置换 IP''' text = des_do_ip(contentBlock) # step2 '''16轮迭代运算''' # subKeyArray=get_sub_key(keyBlock,keyBlockFormatBase) for i in range(16): l,r=text[:32], text[32:] lNext=r rNext=des_do_right32(l, r, subKeyArray[i]) text=lNext+rNext file = open("debug.txt", "a") file.write("\n第" + str(i + 1) + "轮输出:\n") file.writelines(str(text)) file.close() # print("第"+str(i+1)+"轮输出:") # print(round[i]) # step3 '''逆初始置换IP-1''' text = text[32:] + text[:32] out = des_do_ip_inverse(text) return out def des_do_ip(contentBlock): '''IP置换''' IP = [ 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8, 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7 ] # content=block_to_content(contentBlock) return [contentBlock[IP[i] - 1] for i in range(64)] def des_do_ip_inverse(contentBlock): '''IP逆置换''' IP_INVERSE = [ 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25 ] # content=block_to_content(contentBlock) return [contentBlock[IP_INVERSE[i] - 1] for i in range(64)] def hill(): # text = content_to_block_array(padding_content(get_content())) message = "Typora will give you a seamless experience as both a reader and a writer. It removes the preview window, mode switcher, syntax symbols of markdown source code, and all other unnecessary distractions.." text = content_to_block_array(padding_content(string_to_ascii_list(message))) print("明文数组") print(text) # 希尔加密 cipher = hill_encrypt_block_array(text, HILL_KEY, 256) cipher = drop_padding(block_array_to_content(cipher)) print("HILL 密文:") # print(cipher) print(ascii_list_to_string(cipher)) # 希尔解解密 cipher = content_to_block_array(padding_content(cipher)) plain = hill_decrypt_block_array(cipher, HILL_KEY_REVERSE, 256) plain = drop_padding(block_array_to_content(plain)) print("HILL 解密文:") # print(plain) print(ascii_list_to_string(plain)) def des(): message = "aaaaaaaa" # message=[15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14] # key=[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5]z message = des_string_proc(message) file = open("debug.txt", "a") file.write("DES 文本:\n") file.writelines(str(message)) file.write("DES 加密开始\n") file.close() # DES 加密 cipher = des_encypt_block_array(message, DES_KEY) cipher = des_block_array_to_content(cipher) # print("DES 密文数组:") # print(cipher) print("DES 密文 ASCII:") print(bin_to_string(cipher)) file = open("debug.txt", "a") file.write("\n\nDES 密文数组:\n") file.writelines(str(cipher)) file.write("\n\nDES 解密开始\n") file.close() # DES 解密 cipher = content_to_des_block_array(cipher) plain = des_decrypt_block_array(cipher, DES_KEY) palin = des_block_array_to_content(plain) # print("DES 明文数组:") # print(palin) print("DES 明文 ASCII:") print(bin_to_string(palin)) file = open("debug.txt", "a") file.write("\n\nDES 明文数组:\n") file.writelines(str(palin)) file.close() if __name__ == "__main__": hill() des()
for contentLine in contentBlock:
random_line_split
cipher_handout.py
import numpy as np HILL_KEY = [[21, 109, 119, 23, 88, 15, 116, 66], [22, 119, 70, 118, 111, 82, 121, 98], [79, 86, 2, 96, 90, 54, 95, 83], [22, 100, 113, 122, 92, 6, 52, 60], [1, 9, 9, 4, 112, 13, 26, 74], [3, 100, 92, 83, 51, 122, 102, 63], [71, 110, 92, 74, 26, 96, 92, 24], [30, 10, 85, 92, 47, 91, 114, 108]] HILL_KEY_REVERSE = [[138, 124, 28, 104, 136, 176, 193, 182], [65, 229, 101, 214, 103, 57, 4, 224], [140, 138, 214, 71, 46, 62, 148, 184], [77, 64, 202, 44, 119, 246, 60, 86], [69, 173, 41, 8, 106, 175, 255, 119], [105, 45, 131, 23, 116, 193, 29, 114], [190, 79, 82, 26, 81, 22, 187, 253], [70, 99, 51, 2, 221, 248, 152, 59]] DES_KEY = [65, 66, 67, 68, 69, 70, 71, 72] def get_content(): content = input("Enter the word to encrypt:") return content def string_to_ascii_list(content): out = [] for letter in content: out.append(ord(letter)) return out def ascii_list_to_bin_list(asciiList, binLen=8): out = [] for ascii in asciiList: itemBin = bin(ascii) for i in range(binLen + 2 - len(itemBin)): out.append(0) for b in itemBin[2:]: out.append(int(b)) return out def bin_to_string(binList, binFormatLen=8): out = "" for i in range(int(len(binList) / binFormatLen)): ascii = "" for j in range(binFormatLen): ascii += str(binList[i * binFormatLen + j]) out += chr(int(ascii, 2)) return out def ascii_list_to_string(list):
def padding_content(content, blocksize=64): for i in range(int((len(content) - 1) / blocksize + 1) * blocksize - len(content)): content.append(0) return content def drop_padding(content): for i in range(len(content)): if content[i] == 0: return content[:i] return content def content_to_block_array(content): contentBlockArray = [] for i in range(0, int(len(content) / 64)): contentBlock = [] for j in range(0, 8): contentLine = [] for k in range(0, 8): contentLine.append(content[i * 8 * 8 + j * 8 + k]) contentBlock.append(contentLine) contentBlockArray.append(contentBlock) return contentBlockArray def content_to_des_block_array(content): contentBlockArray = [] for i in range(0, int(len(content) / 64)): contentBlock = [] for j in range(0, 64): contentBlock.append(content[i * 64 + j]) contentBlockArray.append(contentBlock) return contentBlockArray def block_array_to_content(contentBlockArray, block_height=8, block_length=8): content = [] for contentBlock in contentBlockArray: for contentLine in contentBlock: for contentItem in contentLine: content.append(contentItem) return content def des_block_array_to_content(contentBlockArray): content = [] for contentBlock in contentBlockArray: for contentLine in contentBlock: content.append(contentLine) return content def block_to_content(contentBlock, block_height=8, block_length=8): content = [] for contentLine in contentBlock: for contentItem in contentLine: content.append(contentItem) return content def hill_encrypt_block_array(contentBlockArray, keyBlock, field): cipherBlockArray = [] keyBlockNum = 0 for contentBlock in contentBlockArray: outMetrix = hill_encrypt_block(contentBlock, keyBlock, field) cipherBlockArray.append(outMetrix) return cipherBlockArray def hill_decrypt_block_array(contentBlockArray, keyBlock, field): plainBlockArray = [] for contentBlock in contentBlockArray: outMetrix = hill_decrypt_block(contentBlock, keyBlock, field) plainBlockArray.append(outMetrix) return plainBlockArray def hill_encrypt_block(contentBlock, keyBlock, field): cipherBlock = [] contentArray = np.array(contentBlock) keyArray = np.array(keyBlock) cipherBlock = np.ndarray.tolist(np.dot(contentArray, keyArray) % field) return cipherBlock def hill_decrypt_block(contentBlock, keyBlock, field): plainBlock = [] contentArray = np.array(contentBlock) keyArray = np.array(keyBlock) plainBlock = np.ndarray.tolist(np.dot(contentArray, keyArray) % field) return plainBlock def des_string_proc(content): return content_to_des_block_array(padding_content(ascii_list_to_bin_list(string_to_ascii_list(content)))) def des_ascii_list_proc(content, formatBase=8): return content_to_des_block_array(padding_content(ascii_list_to_bin_list(content, formatBase))) # def des_encypt_block_array(content,keyBlock): # cipherBlockArray = [] # contentBlockArray=des_content_proc(content) # keyBlockNum = 0 # for contentBlock in contentBlockArray: # outMetrix = des_encypt_block(contentBlock, keyBlock) # cipherBlockArray.append(outMetrix) # return cipherBlockArray def des_encypt_block_array(contentBlockArray, keyBlock, keyBlockFormatBase=8): cipherBlockArray = [] subKeyArray = get_sub_key(keyBlock, keyBlockFormatBase) file = open("debug.txt", "a") file.write("\n加密子密钥:\n") file.writelines(str(subKeyArray)) file.close() for contentBlock in contentBlockArray: outMetrix = des_encypt_block(contentBlock, subKeyArray, keyBlockFormatBase) cipherBlockArray.append(outMetrix) return cipherBlockArray def des_decrypt_block_array(contentBlockArray, keyBlock, keyBlockFormatBase=8): cipherBlockArray = [] subKeyArray = get_sub_key(keyBlock, keyBlockFormatBase) subDecryptKeyArray = subKeyArray[::-1] file = open("debug.txt", "a") file.write("\n解密子密钥:\n") file.writelines(str(subDecryptKeyArray)) file.close() for contentBlock in contentBlockArray: outMetrix = des_encypt_block(contentBlock, subDecryptKeyArray, keyBlockFormatBase) cipherBlockArray.append(outMetrix) return cipherBlockArray def list_xor(list1, list2): out = [] for i in range(len(list1)): out.append(list1[i] ^ list2[i]) return out # def des_key_proc(keyBlock): # return ascii_list_to_bin_list(keyBlock) def get_sub_key(keyBlock, keyBlockFormatBase=8): key = ascii_list_to_bin_list(keyBlock, keyBlockFormatBase) file = open("debug.txt", "a") file.write("\n密钥:\n") file.writelines(str(key)) file.close() key56 = des_key_do_pc_1(key) keyBlock = des_key_do_shift_pc_2(key56) return keyBlock def des_do_extend_permutation(content32List): '''扩展置换:将32位输入置换成48位输出。''' '''扩展置置换目标是IP置换后获得的右半部分R0,将32位输入扩展为48位(分为4位×8组)输出。''' E = [32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1] return [content32List[E[i] - 1] for i in range(48)] def des_key_do_pc_1(keyList): '''密钥置换:不考虑每个字节的第8位,DES的密钥由64位减至56位,每个字节的第8位作为奇偶校验位。''' PC = [ 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4 ] return [keyList[PC[i] - 1] for i in range(56)] def des_key_do_shift_pc_2(keyList): '''在DES的每一轮中,从56位密钥产生出不同的48位子密钥''' '''该处输出为所有轮次的子密钥''' PC = [14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32] MOV = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1] result = [] key56=keyList for i in range(16): # 每28位为一部分,分别进行循环左移 key0 = des_do_shift(key56[:28], MOV[i]) key1 = des_do_shift(key56[28:], MOV[i]) key56 = key0 + key1 # 对56位密钥进行 PC-2 变换,将其压缩为48位 key48 = [key56[PC[j] - 1] for j in range(48)] result.append(key48) return result def des_do_shift(keyList, mov): return keyList[mov:] + keyList[:mov] def des_do_s_box(list48): '''S-盒置换:将48位输入均分成长度为6的8个小组,每个小组按顺序进入相应的S盒各得到4位输出,返回合并后的32位结果。''' # S 盒 S_BOX = [[ [14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7], [0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8], [4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0], [15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13], ], [ [15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10], [3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5], [0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15], [13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9], ], [ [10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8], [13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1], [13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7], [1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12], ], [ [7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15], [13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9], [10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4], [3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14], ], [ [2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9], [14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6], [4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14], [11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3], ], [ [12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11], [10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8], [9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6], [4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13], ], [ [4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1], [13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6], [1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2], [6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12], ], [ [13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7], [1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2], [7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8], [2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11], ]] result = [] for i in range(0, 8): temp = list48[i * 6:i * 6 + 6] row = int(str(temp[0]) + str(temp[-1]), 2) column = int(str(temp[1]) + str(temp[2]) + str(temp[3]) + str(temp[4]), 2) letter = S_BOX[i][row][column] result.append(letter) return ascii_list_to_bin_list(result, 4) def des_do_p_box(list32): P_BOX = [16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25] return [list32[P_BOX[i] - 1] for i in range(32)] def des_do_right32(left32, right32, subKey): right48 = des_do_extend_permutation(right32) right48tmp = list_xor(right48, subKey) right32tmp = des_do_s_box(right48tmp) right32tmp = des_do_p_box(right32tmp) right32 = list_xor(left32, right32tmp) return right32 def des_encypt_block(contentBlock, subKeyArray, keyBlockFormatBase=8): # step1 '''初始置换 IP''' text = des_do_ip(contentBlock) # step2 '''16轮迭代运算''' # subKeyArray=get_sub_key(keyBlock,keyBlockFormatBase) for i in range(16): l,r=text[:32], text[32:] lNext=r rNext=des_do_right32(l, r, subKeyArray[i]) text=lNext+rNext file = open("debug.txt", "a") file.write("\n第" + str(i + 1) + "轮输出:\n") file.writelines(str(text)) file.close() # print("第"+str(i+1)+"轮输出:") # print(round[i]) # step3 '''逆初始置换IP-1''' text = text[32:] + text[:32] out = des_do_ip_inverse(text) return out def des_do_ip(contentBlock): '''IP置换''' IP = [ 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8, 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7 ] # content=block_to_content(contentBlock) return [contentBlock[IP[i] - 1] for i in range(64)] def des_do_ip_inverse(contentBlock): '''IP逆置换''' IP_INVERSE = [ 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25 ] # content=block_to_content(contentBlock) return [contentBlock[IP_INVERSE[i] - 1] for i in range(64)] def hill(): # text = content_to_block_array(padding_content(get_content())) message = "Typora will give you a seamless experience as both a reader and a writer. It removes the preview window, mode switcher, syntax symbols of markdown source code, and all other unnecessary distractions.." text = content_to_block_array(padding_content(string_to_ascii_list(message))) print("明文数组") print(text) # 希尔加密 cipher = hill_encrypt_block_array(text, HILL_KEY, 256) cipher = drop_padding(block_array_to_content(cipher)) print("HILL 密文:") # print(cipher) print(ascii_list_to_string(cipher)) # 希尔解解密 cipher = content_to_block_array(padding_content(cipher)) plain = hill_decrypt_block_array(cipher, HILL_KEY_REVERSE, 256) plain = drop_padding(block_array_to_content(plain)) print("HILL 解密文:") # print(plain) print(ascii_list_to_string(plain)) def des(): message = "aaaaaaaa" # message=[15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14] # key=[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5]z message = des_string_proc(message) file = open("debug.txt", "a") file.write("DES 文本:\n") file.writelines(str(message)) file.write("DES 加密开始\n") file.close() # DES 加密 cipher = des_encypt_block_array(message, DES_KEY) cipher = des_block_array_to_content(cipher) # print("DES 密文数组:") # print(cipher) print("DES 密文 ASCII:") print(bin_to_string(cipher)) file = open("debug.txt", "a") file.write("\n\nDES 密文数组:\n") file.writelines(str(cipher)) file.write("\n\nDES 解密开始\n") file.close() # DES 解密 cipher = content_to_des_block_array(cipher) plain = des_decrypt_block_array(cipher, DES_KEY) palin = des_block_array_to_content(plain) # print("DES 明文数组:") # print(palin) print("DES 明文 ASCII:") print(bin_to_string(palin)) file = open("debug.txt", "a") file.write("\n\nDES 明文数组:\n") file.writelines(str(palin)) file.close() if __name__ == "__main__": hill() des()
str = "" for item in list: str += chr(item) return str
identifier_body
cipher_handout.py
import numpy as np HILL_KEY = [[21, 109, 119, 23, 88, 15, 116, 66], [22, 119, 70, 118, 111, 82, 121, 98], [79, 86, 2, 96, 90, 54, 95, 83], [22, 100, 113, 122, 92, 6, 52, 60], [1, 9, 9, 4, 112, 13, 26, 74], [3, 100, 92, 83, 51, 122, 102, 63], [71, 110, 92, 74, 26, 96, 92, 24], [30, 10, 85, 92, 47, 91, 114, 108]] HILL_KEY_REVERSE = [[138, 124, 28, 104, 136, 176, 193, 182], [65, 229, 101, 214, 103, 57, 4, 224], [140, 138, 214, 71, 46, 62, 148, 184], [77, 64, 202, 44, 119, 246, 60, 86], [69, 173, 41, 8, 106, 175, 255, 119], [105, 45, 131, 23, 116, 193, 29, 114], [190, 79, 82, 26, 81, 22, 187, 253], [70, 99, 51, 2, 221, 248, 152, 59]] DES_KEY = [65, 66, 67, 68, 69, 70, 71, 72] def get_content(): content = input("Enter the word to encrypt:") return content def string_to_ascii_list(content): out = [] for letter in content: out.append(ord(letter)) return out def ascii_list_to_bin_list(asciiList, binLen=8): out = [] for ascii in asciiList: itemBin = bin(ascii) for i in range(binLen + 2 - len(itemBin)): out.append(0) for b in itemBin[2:]: out.append(int(b)) return out def bin_to_string(binList, binFormatLen=8): out = "" for i in range(int(len(binList) / binFormatLen)): ascii = "" for j in range(binFormatLen): ascii += str(binList[i * binFormatLen + j]) out += chr(int(ascii, 2)) return out def ascii_list_to_string(list): str = "" for item in list: str += chr(item) return str def padding_content(content, blocksize=64): for i in range(int((len(content) - 1) / blocksize + 1) * blocksize - len(content)): content.append(0) return content def drop_padding(content): for i in range(len(content)): if content[i] == 0: return content[:i] return content def content_to_block_array(content): contentBlockArray = [] for i in range(0, int(len(content) / 64)): contentBlock = [] for j in range(0, 8): contentLine = [] for k in range(0, 8): contentLine.append(content[i * 8 * 8 + j * 8 + k]) contentBlock.append(contentLine) contentBlockArray.append(contentBlock) return contentBlockArray def content_to_des_block_array(content): contentBlockArray = [] for i in range(0, int(len(content) / 64)): contentBlock = [] for j in range(0, 64): contentBlock.append(content[i * 64 + j]) contentBlockArray.append(contentBlock) return contentBlockArray def block_array_to_content(contentBlockArray, block_height=8, block_length=8): content = [] for contentBlock in contentBlockArray: for contentLine in contentBlock: for contentItem in contentLine: content.append(contentItem) return content def des_block_array_to_content(contentBlockArray): content = [] for contentBlock in contentBlockArray: for contentLine in contentBlock: content.append(contentLine) return content def block_to_content(contentBlock, block_height=8, block_length=8): content = [] for contentLine in contentBlock: for contentItem in contentLine: content.append(contentItem) return content def hill_encrypt_block_array(contentBlockArray, keyBlock, field): cipherBlockArray = [] keyBlockNum = 0 for contentBlock in contentBlockArray: outMetrix = hill_encrypt_block(contentBlock, keyBlock, field) cipherBlockArray.append(outMetrix) return cipherBlockArray def hill_decrypt_block_array(contentBlockArray, keyBlock, field): plainBlockArray = [] for contentBlock in contentBlockArray: outMetrix = hill_decrypt_block(contentBlock, keyBlock, field) plainBlockArray.append(outMetrix) return plainBlockArray def hill_encrypt_block(contentBlock, keyBlock, field): cipherBlock = [] contentArray = np.array(contentBlock) keyArray = np.array(keyBlock) cipherBlock = np.ndarray.tolist(np.dot(contentArray, keyArray) % field) return cipherBlock def hill_decrypt_block(contentBlock, keyBlock, field): plainBlock = [] contentArray = np.array(contentBlock) keyArray = np.array(keyBlock) plainBlock = np.ndarray.tolist(np.dot(contentArray, keyArray) % field) return plainBlock def des_string_proc(content): return content_to_des_block_array(padding_content(ascii_list_to_bin_list(string_to_ascii_list(content)))) def des_ascii_list_proc(content, formatBase=8): return content_to_des_block_array(padding_content(ascii_list_to_bin_list(content, formatBase))) # def des_encypt_block_array(content,keyBlock): # cipherBlockArray = [] # contentBlockArray=des_content_proc(content) # keyBlockNum = 0 # for contentBlock in contentBlockArray: # outMetrix = des_encypt_block(contentBlock, keyBlock) # cipherBlockArray.append(outMetrix) # return cipherBlockArray def des_encypt_block_array(contentBlockArray, keyBlock, keyBlockFormatBase=8): cipherBlockArray = [] subKeyArray = get_sub_key(keyBlock, keyBlockFormatBase) file = open("debug.txt", "a") file.write("\n加密子密钥:\n") file.writelines(str(subKeyArray)) file.close() for contentBlock in contentBlockArray: outMetrix = des_encypt_block(contentBlock, subKeyArray, keyBlockFormatBase) cipherBlockArray.append(outMetrix) return cipherBlockArray def des_decrypt_block_array(contentBlockArray, keyBlock, keyBlockFormatBase=8): cipherBlockArray = [] subKeyArray = get_sub_key(keyBlock, keyBlockFormatBase) subDecryptKeyArray = subKeyArray[::-1] file = open("debug.txt", "a") file.write("\n解密子密钥:\n") file.writelines(str(subDecryptKeyArray)) file.close() for contentBlock in contentBlockArray: outMetrix = des_encypt_block(contentBlock, subDecryptKeyArray, keyBlockFormatBase) cipherBlockArray.append(outMetrix) return cipherBlockArray def list_xor(list1, list2): out = [] for i in range(len(list1)): out.append(list1[i] ^ list2[i]) return out # def des_key_proc(keyBlock): # return ascii_list_to_bin_list(keyBlock) def get_sub_key(keyBlock, keyBlockFormatBase=8): key = ascii_list_to_bin_list(keyBlock, keyBlockFormatBase) file = open("debug.txt", "a") file.write("\n密钥:\n") file.writelines(str(key)) file.close() key56 = des_key_do_pc_1(key) keyBlock = des_key_do_shift_pc_2(key56) return keyBlock def des_do_extend_permutation(content32List): '''扩展置换:将32位输入置换成48位输出。''' '''扩展置置换目标是IP置换后获得的右半部分R0,将32位输入扩展为48位(分为4位×8组)输出。''' E = [32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1] return [content32List[E[i] - 1] for i in range(48)] def des_key_do_pc_1(keyList): '''密钥置换:不考虑每个字节的第8位,DES的密钥由64位减至56位,每个字节的第8位作为奇偶校验位。''' PC = [ 57, 49, 41, 33, 25, 17, 9,
34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4 ] return [keyList[PC[i] - 1] for i in range(56)] def des_key_do_shift_pc_2(keyList): '''在DES的每一轮中,从56位密钥产生出不同的48位子密钥''' '''该处输出为所有轮次的子密钥''' PC = [14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32] MOV = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1] result = [] key56=keyList for i in range(16): # 每28位为一部分,分别进行循环左移 key0 = des_do_shift(key56[:28], MOV[i]) key1 = des_do_shift(key56[28:], MOV[i]) key56 = key0 + key1 # 对56位密钥进行 PC-2 变换,将其压缩为48位 key48 = [key56[PC[j] - 1] for j in range(48)] result.append(key48) return result def des_do_shift(keyList, mov): return keyList[mov:] + keyList[:mov] def des_do_s_box(list48): '''S-盒置换:将48位输入均分成长度为6的8个小组,每个小组按顺序进入相应的S盒各得到4位输出,返回合并后的32位结果。''' # S 盒 S_BOX = [[ [14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7], [0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8], [4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0], [15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13], ], [ [15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10], [3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5], [0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15], [13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9], ], [ [10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8], [13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1], [13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7], [1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12], ], [ [7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15], [13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9], [10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4], [3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14], ], [ [2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9], [14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6], [4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14], [11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3], ], [ [12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11], [10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8], [9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6], [4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13], ], [ [4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1], [13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6], [1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2], [6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12], ], [ [13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7], [1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2], [7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8], [2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11], ]] result = [] for i in range(0, 8): temp = list48[i * 6:i * 6 + 6] row = int(str(temp[0]) + str(temp[-1]), 2) column = int(str(temp[1]) + str(temp[2]) + str(temp[3]) + str(temp[4]), 2) letter = S_BOX[i][row][column] result.append(letter) return ascii_list_to_bin_list(result, 4) def des_do_p_box(list32): P_BOX = [16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25] return [list32[P_BOX[i] - 1] for i in range(32)] def des_do_right32(left32, right32, subKey): right48 = des_do_extend_permutation(right32) right48tmp = list_xor(right48, subKey) right32tmp = des_do_s_box(right48tmp) right32tmp = des_do_p_box(right32tmp) right32 = list_xor(left32, right32tmp) return right32 def des_encypt_block(contentBlock, subKeyArray, keyBlockFormatBase=8): # step1 '''初始置换 IP''' text = des_do_ip(contentBlock) # step2 '''16轮迭代运算''' # subKeyArray=get_sub_key(keyBlock,keyBlockFormatBase) for i in range(16): l,r=text[:32], text[32:] lNext=r rNext=des_do_right32(l, r, subKeyArray[i]) text=lNext+rNext file = open("debug.txt", "a") file.write("\n第" + str(i + 1) + "轮输出:\n") file.writelines(str(text)) file.close() # print("第"+str(i+1)+"轮输出:") # print(round[i]) # step3 '''逆初始置换IP-1''' text = text[32:] + text[:32] out = des_do_ip_inverse(text) return out def des_do_ip(contentBlock): '''IP置换''' IP = [ 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8, 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7 ] # content=block_to_content(contentBlock) return [contentBlock[IP[i] - 1] for i in range(64)] def des_do_ip_inverse(contentBlock): '''IP逆置换''' IP_INVERSE = [ 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25 ] # content=block_to_content(contentBlock) return [contentBlock[IP_INVERSE[i] - 1] for i in range(64)] def hill(): # text = content_to_block_array(padding_content(get_content())) message = "Typora will give you a seamless experience as both a reader and a writer. It removes the preview window, mode switcher, syntax symbols of markdown source code, and all other unnecessary distractions.." text = content_to_block_array(padding_content(string_to_ascii_list(message))) print("明文数组") print(text) # 希尔加密 cipher = hill_encrypt_block_array(text, HILL_KEY, 256) cipher = drop_padding(block_array_to_content(cipher)) print("HILL 密文:") # print(cipher) print(ascii_list_to_string(cipher)) # 希尔解解密 cipher = content_to_block_array(padding_content(cipher)) plain = hill_decrypt_block_array(cipher, HILL_KEY_REVERSE, 256) plain = drop_padding(block_array_to_content(plain)) print("HILL 解密文:") # print(plain) print(ascii_list_to_string(plain)) def des(): message = "aaaaaaaa" # message=[15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14] # key=[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5]z message = des_string_proc(message) file = open("debug.txt", "a") file.write("DES 文本:\n") file.writelines(str(message)) file.write("DES 加密开始\n") file.close() # DES 加密 cipher = des_encypt_block_array(message, DES_KEY) cipher = des_block_array_to_content(cipher) # print("DES 密文数组:") # print(cipher) print("DES 密文 ASCII:") print(bin_to_string(cipher)) file = open("debug.txt", "a") file.write("\n\nDES 密文数组:\n") file.writelines(str(cipher)) file.write("\n\nDES 解密开始\n") file.close() # DES 解密 cipher = content_to_des_block_array(cipher) plain = des_decrypt_block_array(cipher, DES_KEY) palin = des_block_array_to_content(plain) # print("DES 明文数组:") # print(palin) print("DES 明文 ASCII:") print(bin_to_string(palin)) file = open("debug.txt", "a") file.write("\n\nDES 明文数组:\n") file.writelines(str(palin)) file.close() if __name__ == "__main__": hill() des()
1, 58, 50, 42,
identifier_name
cipher_handout.py
import numpy as np HILL_KEY = [[21, 109, 119, 23, 88, 15, 116, 66], [22, 119, 70, 118, 111, 82, 121, 98], [79, 86, 2, 96, 90, 54, 95, 83], [22, 100, 113, 122, 92, 6, 52, 60], [1, 9, 9, 4, 112, 13, 26, 74], [3, 100, 92, 83, 51, 122, 102, 63], [71, 110, 92, 74, 26, 96, 92, 24], [30, 10, 85, 92, 47, 91, 114, 108]] HILL_KEY_REVERSE = [[138, 124, 28, 104, 136, 176, 193, 182], [65, 229, 101, 214, 103, 57, 4, 224], [140, 138, 214, 71, 46, 62, 148, 184], [77, 64, 202, 44, 119, 246, 60, 86], [69, 173, 41, 8, 106, 175, 255, 119], [105, 45, 131, 23, 116, 193, 29, 114], [190, 79, 82, 26, 81, 22, 187, 253], [70, 99, 51, 2, 221, 248, 152, 59]] DES_KEY = [65, 66, 67, 68, 69, 70, 71, 72] def get_content(): content = input("Enter the word to encrypt:") return content def string_to_ascii_list(content): out = [] for letter in content: out.append(ord(letter)) return out def ascii_list_to_bin_list(asciiList, binLen=8): out = [] for ascii in asciiList: itemBin = bin(ascii) for i in range(binLen + 2 - len(itemBin)): out.append(0) for b in itemBin[2:]: out.append(int(b)) return out def bin_to_string(binList, binFormatLen=8): out = "" for i in range(int(len(binList) / binFormatLen)): ascii = "" for j in range(binFormatLen): ascii += str(binList[i * binFormatLen + j]) out += chr(int(ascii, 2)) return out def ascii_list_to_string(list): str = "" for item in list: str += chr(item) return str def padding_content(content, blocksize=64): for i in range(int((len(content) - 1) / blocksize + 1) * blocksize - len(content)): content.append(0) return content def drop_padding(content): for i in range(len(content)): if content[i] == 0: return content[:i] return content def content_to_block_array(content): contentBlockArray = [] for i in range(0, int(len(content) / 64)): contentBlock = [] for j in range(0, 8): contentLine = [] for k in range(0, 8): contentLine.append(content[i * 8 * 8 + j * 8 + k]) contentBlock.append(contentLine) contentBlockArray.append(contentBlock) return contentBlockArray def content_to_des_block_array(content): contentBlockArray = [] for i in range(0, int(len(content) / 64)): contentBlock = [] for j in range(0, 64): contentBlock.append(content[i * 64 + j]) contentBlockArray.append(contentBlock) return contentBlockArray def block_array_to_content(contentBlockArray, block_height=8, block_length=8): content = [] for contentBlock in contentBlockArray: for contentLine in contentBlock: for contentItem in contentLine:
return content def des_block_array_to_content(contentBlockArray): content = [] for contentBlock in contentBlockArray: for contentLine in contentBlock: content.append(contentLine) return content def block_to_content(contentBlock, block_height=8, block_length=8): content = [] for contentLine in contentBlock: for contentItem in contentLine: content.append(contentItem) return content def hill_encrypt_block_array(contentBlockArray, keyBlock, field): cipherBlockArray = [] keyBlockNum = 0 for contentBlock in contentBlockArray: outMetrix = hill_encrypt_block(contentBlock, keyBlock, field) cipherBlockArray.append(outMetrix) return cipherBlockArray def hill_decrypt_block_array(contentBlockArray, keyBlock, field): plainBlockArray = [] for contentBlock in contentBlockArray: outMetrix = hill_decrypt_block(contentBlock, keyBlock, field) plainBlockArray.append(outMetrix) return plainBlockArray def hill_encrypt_block(contentBlock, keyBlock, field): cipherBlock = [] contentArray = np.array(contentBlock) keyArray = np.array(keyBlock) cipherBlock = np.ndarray.tolist(np.dot(contentArray, keyArray) % field) return cipherBlock def hill_decrypt_block(contentBlock, keyBlock, field): plainBlock = [] contentArray = np.array(contentBlock) keyArray = np.array(keyBlock) plainBlock = np.ndarray.tolist(np.dot(contentArray, keyArray) % field) return plainBlock def des_string_proc(content): return content_to_des_block_array(padding_content(ascii_list_to_bin_list(string_to_ascii_list(content)))) def des_ascii_list_proc(content, formatBase=8): return content_to_des_block_array(padding_content(ascii_list_to_bin_list(content, formatBase))) # def des_encypt_block_array(content,keyBlock): # cipherBlockArray = [] # contentBlockArray=des_content_proc(content) # keyBlockNum = 0 # for contentBlock in contentBlockArray: # outMetrix = des_encypt_block(contentBlock, keyBlock) # cipherBlockArray.append(outMetrix) # return cipherBlockArray def des_encypt_block_array(contentBlockArray, keyBlock, keyBlockFormatBase=8): cipherBlockArray = [] subKeyArray = get_sub_key(keyBlock, keyBlockFormatBase) file = open("debug.txt", "a") file.write("\n加密子密钥:\n") file.writelines(str(subKeyArray)) file.close() for contentBlock in contentBlockArray: outMetrix = des_encypt_block(contentBlock, subKeyArray, keyBlockFormatBase) cipherBlockArray.append(outMetrix) return cipherBlockArray def des_decrypt_block_array(contentBlockArray, keyBlock, keyBlockFormatBase=8): cipherBlockArray = [] subKeyArray = get_sub_key(keyBlock, keyBlockFormatBase) subDecryptKeyArray = subKeyArray[::-1] file = open("debug.txt", "a") file.write("\n解密子密钥:\n") file.writelines(str(subDecryptKeyArray)) file.close() for contentBlock in contentBlockArray: outMetrix = des_encypt_block(contentBlock, subDecryptKeyArray, keyBlockFormatBase) cipherBlockArray.append(outMetrix) return cipherBlockArray def list_xor(list1, list2): out = [] for i in range(len(list1)): out.append(list1[i] ^ list2[i]) return out # def des_key_proc(keyBlock): # return ascii_list_to_bin_list(keyBlock) def get_sub_key(keyBlock, keyBlockFormatBase=8): key = ascii_list_to_bin_list(keyBlock, keyBlockFormatBase) file = open("debug.txt", "a") file.write("\n密钥:\n") file.writelines(str(key)) file.close() key56 = des_key_do_pc_1(key) keyBlock = des_key_do_shift_pc_2(key56) return keyBlock def des_do_extend_permutation(content32List): '''扩展置换:将32位输入置换成48位输出。''' '''扩展置置换目标是IP置换后获得的右半部分R0,将32位输入扩展为48位(分为4位×8组)输出。''' E = [32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1] return [content32List[E[i] - 1] for i in range(48)] def des_key_do_pc_1(keyList): '''密钥置换:不考虑每个字节的第8位,DES的密钥由64位减至56位,每个字节的第8位作为奇偶校验位。''' PC = [ 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4 ] return [keyList[PC[i] - 1] for i in range(56)] def des_key_do_shift_pc_2(keyList): '''在DES的每一轮中,从56位密钥产生出不同的48位子密钥''' '''该处输出为所有轮次的子密钥''' PC = [14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32] MOV = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1] result = [] key56=keyList for i in range(16): # 每28位为一部分,分别进行循环左移 key0 = des_do_shift(key56[:28], MOV[i]) key1 = des_do_shift(key56[28:], MOV[i]) key56 = key0 + key1 # 对56位密钥进行 PC-2 变换,将其压缩为48位 key48 = [key56[PC[j] - 1] for j in range(48)] result.append(key48) return result def des_do_shift(keyList, mov): return keyList[mov:] + keyList[:mov] def des_do_s_box(list48): '''S-盒置换:将48位输入均分成长度为6的8个小组,每个小组按顺序进入相应的S盒各得到4位输出,返回合并后的32位结果。''' # S 盒 S_BOX = [[ [14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7], [0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8], [4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0], [15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13], ], [ [15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10], [3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5], [0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15], [13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9], ], [ [10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8], [13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1], [13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7], [1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12], ], [ [7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15], [13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9], [10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4], [3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14], ], [ [2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9], [14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6], [4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14], [11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3], ], [ [12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11], [10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8], [9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6], [4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13], ], [ [4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1], [13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6], [1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2], [6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12], ], [ [13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7], [1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2], [7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8], [2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11], ]] result = [] for i in range(0, 8): temp = list48[i * 6:i * 6 + 6] row = int(str(temp[0]) + str(temp[-1]), 2) column = int(str(temp[1]) + str(temp[2]) + str(temp[3]) + str(temp[4]), 2) letter = S_BOX[i][row][column] result.append(letter) return ascii_list_to_bin_list(result, 4) def des_do_p_box(list32): P_BOX = [16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25] return [list32[P_BOX[i] - 1] for i in range(32)] def des_do_right32(left32, right32, subKey): right48 = des_do_extend_permutation(right32) right48tmp = list_xor(right48, subKey) right32tmp = des_do_s_box(right48tmp) right32tmp = des_do_p_box(right32tmp) right32 = list_xor(left32, right32tmp) return right32 def des_encypt_block(contentBlock, subKeyArray, keyBlockFormatBase=8): # step1 '''初始置换 IP''' text = des_do_ip(contentBlock) # step2 '''16轮迭代运算''' # subKeyArray=get_sub_key(keyBlock,keyBlockFormatBase) for i in range(16): l,r=text[:32], text[32:] lNext=r rNext=des_do_right32(l, r, subKeyArray[i]) text=lNext+rNext file = open("debug.txt", "a") file.write("\n第" + str(i + 1) + "轮输出:\n") file.writelines(str(text)) file.close() # print("第"+str(i+1)+"轮输出:") # print(round[i]) # step3 '''逆初始置换IP-1''' text = text[32:] + text[:32] out = des_do_ip_inverse(text) return out def des_do_ip(contentBlock): '''IP置换''' IP = [ 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8, 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7 ] # content=block_to_content(contentBlock) return [contentBlock[IP[i] - 1] for i in range(64)] def des_do_ip_inverse(contentBlock): '''IP逆置换''' IP_INVERSE = [ 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25 ] # content=block_to_content(contentBlock) return [contentBlock[IP_INVERSE[i] - 1] for i in range(64)] def hill(): # text = content_to_block_array(padding_content(get_content())) message = "Typora will give you a seamless experience as both a reader and a writer. It removes the preview window, mode switcher, syntax symbols of markdown source code, and all other unnecessary distractions.." text = content_to_block_array(padding_content(string_to_ascii_list(message))) print("明文数组") print(text) # 希尔加密 cipher = hill_encrypt_block_array(text, HILL_KEY, 256) cipher = drop_padding(block_array_to_content(cipher)) print("HILL 密文:") # print(cipher) print(ascii_list_to_string(cipher)) # 希尔解解密 cipher = content_to_block_array(padding_content(cipher)) plain = hill_decrypt_block_array(cipher, HILL_KEY_REVERSE, 256) plain = drop_padding(block_array_to_content(plain)) print("HILL 解密文:") # print(plain) print(ascii_list_to_string(plain)) def des(): message = "aaaaaaaa" # message=[15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14] # key=[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5]z message = des_string_proc(message) file = open("debug.txt", "a") file.write("DES 文本:\n") file.writelines(str(message)) file.write("DES 加密开始\n") file.close() # DES 加密 cipher = des_encypt_block_array(message, DES_KEY) cipher = des_block_array_to_content(cipher) # print("DES 密文数组:") # print(cipher) print("DES 密文 ASCII:") print(bin_to_string(cipher)) file = open("debug.txt", "a") file.write("\n\nDES 密文数组:\n") file.writelines(str(cipher)) file.write("\n\nDES 解密开始\n") file.close() # DES 解密 cipher = content_to_des_block_array(cipher) plain = des_decrypt_block_array(cipher, DES_KEY) palin = des_block_array_to_content(plain) # print("DES 明文数组:") # print(palin) print("DES 明文 ASCII:") print(bin_to_string(palin)) file = open("debug.txt", "a") file.write("\n\nDES 明文数组:\n") file.writelines(str(palin)) file.close() if __name__ == "__main__": hill() des()
content.append(contentItem)
conditional_block
K2400.py
#!/usr/bin/python # -*- coding: utf-8 -*- import K2400_help import K2400_time import K2400_output import K2400_plot import K2400_transfer import signal import logging import serial import os import time import fcntl import sys import shutil from datetime import datetime # -- new USB_VGS = "/dev/my_USB_K2400_VGS" USB_VDS = "/dev/my_USB_K2400_VDS" USB_PICO = "/dev/my_USB_PICO" PATH = os.path.realpath(__file__) PATH = os.path.splitdrive(PATH)[1] PATH = os.path.dirname(os.path.dirname(PATH)) PATH_LOG = PATH + "/log/" PATH_DATA = PATH + "/data/" FILE_SSHT = PATH + "/wd/shutter.txt" FILE_START = PATH + "/wd/start.txt" FILE_CHANGE = PATH + "/wd/change.txt" FILE_SWITCH = PATH + "/wd/switch.txt" FILE_COMMENTS = PATH + "/wd/comments.txt" FILE_NAME = "TR_TEST" PICNPLC = 1.0 SSHT = 0 FILE = 0 FF = " " PLOT = "" plt=K2400_plot.plot() def sht(): return 0 def sht_change(): return 0 def port_write(ser, cmd): cmd = cmd + "\n" ser.write(cmd) return 0 def port_read(ser): out = '' c = ser.read() while c != '\r': # \n out += c c = ser.read() return out def port_wr(ser, cmd): # port write and read port_write(ser, cmd) return str(port_read(ser)) def get_TUI(ser): return port_wr(ser, "READ?").split(",") def get_PICO(ser): return port_wr(ser, "MEASure?") def set_voltage2(a): return set_voltage(*a) def set_voltage(ser, set_VOLT): port_write(ser, ":SOUR:VOLT:LEV " + str(set_VOLT)) # Set source output level to 10V. return port_wr(ser, "READ?").split(",") # Trigger and acquire one data string. def set_voltage_init(ser, set_VOLT, set_compl, NPLC, DELAY): print "================", set_VOLT logging.info("SET_VOLTAGE_INIT: " + str(ser)) logging.info(str(set_VOLT) + " " + str(set_compl) + " " + str(NPLC) + " " + str(DELAY) + " OUTPUT ON") port_write(ser, ":SOUR:FUNC VOLT") # Select voltage source function. port_write(ser, ":SOUR:VOLT:MODE FIX") # Select fixed voltage source mode. port_write(ser, ":SOUR:VOLT:LEV " + str(float(set_VOLT))) # Set source output level to 10V. port_write(ser, ":SOUR:DEL " + str(DELAY)) # Set delay between set volt and measure port_write(ser, ":SENS:FUNC 'CURR'") # Select current measurement function. port_write(ser, ":SENS:CURR:NPLC " + str(NPLC)) # Select current measurement function. if set_compl != 0: port_write(ser, ":SENS:CURR:PROT " + str(set_compl)) # Set compliance limit to 10mA. port_write(ser, ":SENS:CURR:RANG " + str(set_compl)) # Select the 10mA measurement range. else: port_write(ser, ":SENS:CURR:PROT AUTO") # Set compliance limit to auto. port_write(ser, ":SENS:CURR:RANG AUTO") # Select the auto measurement range. port_write(ser, ":OUTP ON") # Turn the output on. def log_reset(): with open(FILE_COMMENTS, "w") as f: f.write("init_log") with open(FILE_SSHT, "w") as f: f.write("0") with open(FILE_CHANGE, "w") as f: f.write("") with open(FILE_SWITCH, 'w') as f: f.write("ON") with open(FILE_START, 'w') as f: f.write("STOP") def log_init(FName): global FILE global FF global PLOT # global plt # if PLOT: # plt=K2400_plot.plot() FName = checkfile(FName) FF = FName logging.info("Create NEW FILE: " + FName) print "=====================================" print FName print "=====================================" with open(PATH + "/log/FName_Last", 'r') as f: LastFName = str(f.readline()) with open(PATH + "/log/FName_Last", 'w') as f: f.write(FName) with open(PATH + "/FName", 'w') as f: f.write(FName) shutil.move(str(PATH) + "/raw.txt", LastFName + ".raw") with open(str(PATH) + "/raw.txt", 'aw+') as the_file: the_file.write("") FILE = open(str(FName), "w") FILE.write("#-------------- Creating on " + str(os.uname()[1]) + " @ " + str(datetime.now()) + "\n") return 0 def log_save(txt): global PLOT #global plt log_save_to_file(txt) log_save_comments() log_save_raw(txt) if PLOT: try: x = float(txt.split()[0]) y = float(txt.split()[1]) plt.plt_update(x, y) except ValueError: pass def log_save_comments(): if os.path.isfile(FILE_COMMENTS) and os.path.getsize(FILE_COMMENTS) > 0: with open(FILE_COMMENTS, "r") as f: txt = "#------ comments: " + str(f.readline()).rstrip() with open(FILE_COMMENTS, "w"): pass print txt log_save_to_file(txt) def log_save_to_file(txt): global FILE FILE.write(str(txt) + "\n") def log_save_raw(txt): with open(PATH_LOG + '/raw_all.txt', 'a') as the_file: the_file.write(str(txt) + "\n") with open(PATH + '/raw.txt', 'a') as the_file: the_file.write(str(txt) + "\n") print txt return 0 def checkfile(path): logging.info("CHECK FILE: " + path) path = os.path.expanduser(path) root, ext = os.path.splitext(os.path.expanduser(path)) dir = os.path.dirname(root) fname = os.path.basename(root) candidate = fname + ext index = 1 ls = set(os.listdir(dir)) while candidate in ls: candidate = "{0}_{1:02d}{2}".format(fname, index, ext) index += 1 ffname = os.path.join(dir, candidate) logging.info("CHECK FILE: " + path) logging.info("CHECK FILE will save:" + ffname) return ffname def signal_handler(signal, frame): K2400.log_save("#::::: ABORT ::::::") print "\n:: You pressed Ctrl+C!" print "::Programm will be TERMINATED ... \n . . . . . . . . . ." with open(FILE_SWITCH, 'w+') as the_file: the_file.write("OFF") def init(): global ser_VGS global ser_VDS logging.info("--------------- NEW ------------------") logging.info("INIT - START") logging.info("INIT:: USB_VDS:" + USB_VDS) logging.info("INIT:: USB_VGS:" + USB_VGS) logging.info("INIT:: USB_PIC:" + USB_PICO) ser_VDS = init_port(USB_VDS) ser_VGS = init_port(USB_VGS) ser_PIC = init_port(USB_PICO) logging.info("INIT:: ser_VDS:" + str(ser_VDS)) logging.info("INIT:: ser_VGS:" + str(ser_VGS)) logging.info("INIT:: ser_PIC:" + str(ser_PIC)) init_controler(ser_VDS) init_controler(ser_VGS) init_controler_pic(ser_PIC) logging.info("INIT - END") return ser_VDS, ser_VGS, ser_PIC def
(PortUSB): signal.signal(signal.SIGINT, signal_handler) ser = serial.Serial( port=PortUSB, baudrate=9600, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1, xonxoff=0, rtscts=1) if ser.isOpen(): try: fcntl.flock(ser.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: print ":: K2400 :: Port temporarily unavailable ..... exit" sys.exit() else: print ":: K2400 :: Some problems with Port ...... exit" sys.exit() ser.flushOutput() ser.flushInput() logging.info("INIT_PORT") return ser def init_controler(ser): if ser.isOpen(): try: ser.flushInput() # flush input buffer, discarding all its contents ser.flushOutput() # flush output buffer, aborting current output # and discard all that is in buffer port_write(ser, ':ABORt') port_write(ser, "*RST") port_write(ser, "TRIG:CLE") # Clear any pending input triggers port_write(ser, ":SYSTem:TIME:RESet:AUTO 0") # zerujemy zegar port_write(ser, ":SYSTem:LFRequency:AUTO ON") # ustawiamy automatyczny dobor czestotliwosci port_write(ser, ":SYSTem:BEEPer 300, 0.4") port_write(ser, ":FORM:ELEM CURR,TIME") # port_write(ser,":DISPlay:ENABle OFF") except Exception, e1: print ":: ERROR: promlem with communicating ...: " + str(e1) else: print ":: ERROR: Can not open serial port: ", ser logging.info("INIT_CONTROLER") return 0 def init_controler_pic(ser): if ser.isOpen(): try: ser.flushInput() # flush input buffer, discarding all its contents ser.flushOutput() # flush output buffer, aborting current output # and discard all that is in buffer port_write(ser, "*CLS") port_write(ser, "*RST") port_write(ser, "FORM:ELEM READ,TIME") port_write(ser, ":RANGe:AUTO:ULIMit") port_write(ser, "CURR:NPLC " + str(PICNPLC / 2)) port_write(ser, ":MEASure?") # port_write(ser,":DISPlay:ENABle OFF") except Exception, e1: print ":: ERROR: problem with communicating ...: " + str(e1) else: print ":: ERROR: Can not open serial port: ", ser logging.info("INIT_CONTROLER_PIC") return 0 def port_close(ser): print ":: Ports close...." for s in ser: logging.info(s) s.close() logging.info("PORT: CLOSE") def format_e(n): a = '%E' % n return a.split('E')[0].rstrip('0').rstrip('.') + 'E' + a.split('E')[1] def switch(): with open(FILE_SWITCH, 'r') as f: line = str(f.readline()) line = line.rstrip() if line == "OFF": logging.info("USE SWITCH: OFF") with open(FILE_SWITCH, 'w+') as the_file: the_file.write("ON") return "OFF" else: return "ON" def start(): with open(FILE_START, 'r') as f: line = str(f.readline()) line = line.rstrip() if line == "START": logging.info("START measurements freom start funcztion") with open(FILE_START, 'w+') as the_file: the_file.write("STOP") return 1 else: return 0 def change(): VGS = -888 VDS = -888 if os.path.isfile(FILE_CHANGE) and os.path.getsize(FILE_CHANGE) > 0: with open(FILE_CHANGE, 'r') as f: lines = len(f.readlines()) f.seek(0) if lines == 2: line = f.readlines() if line[0].split("=")[0] == "VGS": VGS = float(line[0].split("=")[1]) elif line[0].split("=")[0] == "VDS": VDS = float(line[0].split("=")[1]) if line[1].split("=")[0] == "VGS": VGS = float(line[1].split("=")[1]) elif line[1].split("=")[0] == "VDS": VDS = float(line[1].split("=")[1]) else: return False, 0, 0 if lines == 1: line = f.readlines() if line[0].split("=")[0] == "VGS": VGS = float(line[0].split("=")[1]) elif line[0].split("=")[0] == "VDS": VDS = float(line[0].split("=")[1]) else: return False, 0, 0 with open(FILE_CHANGE, "w"): pass txt = "#------ comments: Change VGS or VDS: " + str(line) log_save(txt) # print txt return "True", VGS, VDS else: return "False", VGS, VDS def file_header(ser_VGS, ser_VDS): logging.info("CREATE FILE HEADER") return 0 def file_footer(ser_VGS, ser_VDS): logging.info("CREATE FILE FOOTER") global File off(ser_VGS) off(ser_VDS) with open(FILE_SSHT, 'w+') as the_file: the_file.write("OFF") sht() log_save("#\n#----------------------- FOOTER ------------------------------------") log_save("# DEV_1 VDS: " + " " + port_wr(ser_VDS, '*IDN?')) log_save("# DEV_1 VDS: " + str(ser_VDS)) log_save("# DEV_2 VGS: " + " " + port_wr(ser_VGS, '*IDN?')) log_save("# DEV_1 VDS: " + str(ser_VGS)) log_save("#") log_save(port_error(ser_VDS, "VDS")) log_save(port_error(ser_VGS, "VGS")) log_save("#") log_save("# ----* zajaczkowksi@mpip-mainz.mpg.de *-------------------------------") log_save("# ----- End of file " + str(datetime.now()) + " created by:" + str(__file__)) global FF print "=====================================" print FF print "=====================================" splt() return 0 def splt(): global PLOT if PLOT: try: plt.save_png(FF+".png") except ValueError: print "Porblem z zapisem obrazka :( " pass def off(ser): logging.info("SENS OFF [:OUTP OFF]") port_write(ser, ":OUTP OFF") def port_error(ser, cmd): logging.info("READ ERRORS") err = [":STAT:MEAS? ", ":SYST:ERR:ALL?", "*ESR? ", "*OPC? ", ":STAT:OPER? ", ":STAT:MEAS? ", ":STAT:QUES? "] out = "# ======= ERROR'S @ " + str(cmd) + "\n#" for x in err: out += "# " + str(x) + " \t " + str(port_wr(ser, x)) + " \n#" # print out return out # ============================= MAIN if __name__ == '__main__': args = K2400_help.help() log_reset() print args.test # ------- podstawowe parametry: if args.filename != "tr00": FILE_NAME = args.filename if args.shutter: print "SHUTTER WILL BE OPEN" with open(FILE_SSHT, 'w+') as the_file: the_file.write("ON " + str(args.shutter)) if args.out: # ------- OUTPUT: parametry pomiaru FName = PATH_DATA + FILE_NAME VDS_start = float(args.out[0][0]) VDS_stop = float(args.out[0][1]) VDS_step = float(args.out[0][2]) VDS_comp = float(args.DCOMP) VGS_start = float(args.out[0][3]) VGS_stop = float(args.out[0][4]) VGS_step = float(args.out[0][5]) VGS_comp = float(args.GCOMP) NPLC = float(args.NPLC) PICNPLC = NPLC DEL = float(args.DEL) SWEEP = True PLOT = args.fig ser = init() output_param = [FName, VDS_start, VDS_stop, VDS_step, VDS_comp, VGS_start, VGS_stop, VGS_step, VGS_comp, NPLC, DEL, SWEEP] K2400_output.output_steps(ser, *output_param) port_close(ser) if args.timee: # ------- TIME: parametry pomiaru FName = PATH_DATA + FILE_NAME VDS = float(args.timee[0][0]) VGS = float(args.timee[0][1]) VDS_comp = args.DCOMP VGS_comp = args.GCOMP NPLC = args.NPLC PICNPLC = NPLC DEL = args.DEL sht = args.shutter ww = args.waitstart PLOT = args.fig ser = init() param_timesteps = [FName, VDS, VDS_comp, VGS, VGS_comp, NPLC, DEL, sht, ww] K2400_time.time_steps(ser, *param_timesteps) port_close(ser) if args.trans: # ------- TRANSFER: parametry pomiaru FName = PATH_DATA + FILE_NAME VDS = args.trans[0][0] VGS_start = args.trans[0][1] VGS_stop = args.trans[0][2] VGS_step = args.trans[0][3] VDS_comp = args.DCOMP VGS_comp = args.GCOMP NPLC = args.NPLC PICNPLC = NPLC DEL = args.DEL SWEEP = args.sweep ttime = args.wait sht = args.shutter PLOT = args.fig param_transfer = [FName, VDS, VGS_start, VGS_stop, VGS_step, VDS_comp, VGS_comp, NPLC, DEL, SWEEP, ttime, sht] ser = init() K2400_transfer.transfer_steps(ser, *param_transfer) port_close(ser) sys.exit() if args.test: data=PATH_DATA+"test.txt" print PLOT log_init(data) PLOT = args.fig t0=time.time() x=-20 while True: x+=0.5 y=2*x*x-2*x+1 txt=str(x)+" "+str(y) log_save(txt) t1=time.time() print t1-t0 if x>=20: break splt()
init_port
identifier_name
K2400.py
#!/usr/bin/python # -*- coding: utf-8 -*- import K2400_help import K2400_time import K2400_output import K2400_plot import K2400_transfer import signal import logging import serial import os import time import fcntl import sys import shutil from datetime import datetime # -- new USB_VGS = "/dev/my_USB_K2400_VGS" USB_VDS = "/dev/my_USB_K2400_VDS" USB_PICO = "/dev/my_USB_PICO" PATH = os.path.realpath(__file__) PATH = os.path.splitdrive(PATH)[1] PATH = os.path.dirname(os.path.dirname(PATH)) PATH_LOG = PATH + "/log/" PATH_DATA = PATH + "/data/" FILE_SSHT = PATH + "/wd/shutter.txt" FILE_START = PATH + "/wd/start.txt" FILE_CHANGE = PATH + "/wd/change.txt" FILE_SWITCH = PATH + "/wd/switch.txt" FILE_COMMENTS = PATH + "/wd/comments.txt" FILE_NAME = "TR_TEST" PICNPLC = 1.0 SSHT = 0 FILE = 0 FF = " " PLOT = "" plt=K2400_plot.plot() def sht(): return 0 def sht_change(): return 0 def port_write(ser, cmd): cmd = cmd + "\n" ser.write(cmd) return 0 def port_read(ser): out = '' c = ser.read() while c != '\r': # \n out += c c = ser.read() return out def port_wr(ser, cmd): # port write and read port_write(ser, cmd) return str(port_read(ser)) def get_TUI(ser): return port_wr(ser, "READ?").split(",") def get_PICO(ser): return port_wr(ser, "MEASure?") def set_voltage2(a): return set_voltage(*a) def set_voltage(ser, set_VOLT): port_write(ser, ":SOUR:VOLT:LEV " + str(set_VOLT)) # Set source output level to 10V. return port_wr(ser, "READ?").split(",") # Trigger and acquire one data string. def set_voltage_init(ser, set_VOLT, set_compl, NPLC, DELAY): print "================", set_VOLT logging.info("SET_VOLTAGE_INIT: " + str(ser)) logging.info(str(set_VOLT) + " " + str(set_compl) + " " + str(NPLC) + " " + str(DELAY) + " OUTPUT ON") port_write(ser, ":SOUR:FUNC VOLT") # Select voltage source function. port_write(ser, ":SOUR:VOLT:MODE FIX") # Select fixed voltage source mode. port_write(ser, ":SOUR:VOLT:LEV " + str(float(set_VOLT))) # Set source output level to 10V. port_write(ser, ":SOUR:DEL " + str(DELAY)) # Set delay between set volt and measure port_write(ser, ":SENS:FUNC 'CURR'") # Select current measurement function. port_write(ser, ":SENS:CURR:NPLC " + str(NPLC)) # Select current measurement function. if set_compl != 0: port_write(ser, ":SENS:CURR:PROT " + str(set_compl)) # Set compliance limit to 10mA. port_write(ser, ":SENS:CURR:RANG " + str(set_compl)) # Select the 10mA measurement range. else: port_write(ser, ":SENS:CURR:PROT AUTO") # Set compliance limit to auto. port_write(ser, ":SENS:CURR:RANG AUTO") # Select the auto measurement range. port_write(ser, ":OUTP ON") # Turn the output on. def log_reset(): with open(FILE_COMMENTS, "w") as f: f.write("init_log") with open(FILE_SSHT, "w") as f: f.write("0") with open(FILE_CHANGE, "w") as f: f.write("") with open(FILE_SWITCH, 'w') as f: f.write("ON") with open(FILE_START, 'w') as f: f.write("STOP") def log_init(FName): global FILE global FF global PLOT # global plt # if PLOT: # plt=K2400_plot.plot() FName = checkfile(FName) FF = FName logging.info("Create NEW FILE: " + FName) print "=====================================" print FName print "=====================================" with open(PATH + "/log/FName_Last", 'r') as f: LastFName = str(f.readline()) with open(PATH + "/log/FName_Last", 'w') as f: f.write(FName) with open(PATH + "/FName", 'w') as f: f.write(FName) shutil.move(str(PATH) + "/raw.txt", LastFName + ".raw") with open(str(PATH) + "/raw.txt", 'aw+') as the_file: the_file.write("") FILE = open(str(FName), "w") FILE.write("#-------------- Creating on " + str(os.uname()[1]) + " @ " + str(datetime.now()) + "\n") return 0 def log_save(txt): global PLOT #global plt log_save_to_file(txt) log_save_comments() log_save_raw(txt) if PLOT: try: x = float(txt.split()[0]) y = float(txt.split()[1]) plt.plt_update(x, y) except ValueError: pass def log_save_comments(): if os.path.isfile(FILE_COMMENTS) and os.path.getsize(FILE_COMMENTS) > 0: with open(FILE_COMMENTS, "r") as f: txt = "#------ comments: " + str(f.readline()).rstrip() with open(FILE_COMMENTS, "w"): pass print txt log_save_to_file(txt) def log_save_to_file(txt): global FILE FILE.write(str(txt) + "\n") def log_save_raw(txt): with open(PATH_LOG + '/raw_all.txt', 'a') as the_file: the_file.write(str(txt) + "\n") with open(PATH + '/raw.txt', 'a') as the_file: the_file.write(str(txt) + "\n") print txt return 0 def checkfile(path): logging.info("CHECK FILE: " + path) path = os.path.expanduser(path) root, ext = os.path.splitext(os.path.expanduser(path)) dir = os.path.dirname(root) fname = os.path.basename(root) candidate = fname + ext index = 1 ls = set(os.listdir(dir)) while candidate in ls: candidate = "{0}_{1:02d}{2}".format(fname, index, ext) index += 1 ffname = os.path.join(dir, candidate) logging.info("CHECK FILE: " + path) logging.info("CHECK FILE will save:" + ffname) return ffname def signal_handler(signal, frame): K2400.log_save("#::::: ABORT ::::::") print "\n:: You pressed Ctrl+C!" print "::Programm will be TERMINATED ... \n . . . . . . . . . ." with open(FILE_SWITCH, 'w+') as the_file: the_file.write("OFF") def init(): global ser_VGS global ser_VDS logging.info("--------------- NEW ------------------") logging.info("INIT - START") logging.info("INIT:: USB_VDS:" + USB_VDS) logging.info("INIT:: USB_VGS:" + USB_VGS) logging.info("INIT:: USB_PIC:" + USB_PICO) ser_VDS = init_port(USB_VDS) ser_VGS = init_port(USB_VGS) ser_PIC = init_port(USB_PICO) logging.info("INIT:: ser_VDS:" + str(ser_VDS)) logging.info("INIT:: ser_VGS:" + str(ser_VGS)) logging.info("INIT:: ser_PIC:" + str(ser_PIC)) init_controler(ser_VDS) init_controler(ser_VGS) init_controler_pic(ser_PIC) logging.info("INIT - END") return ser_VDS, ser_VGS, ser_PIC def init_port(PortUSB): signal.signal(signal.SIGINT, signal_handler) ser = serial.Serial( port=PortUSB, baudrate=9600, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1, xonxoff=0, rtscts=1) if ser.isOpen(): try: fcntl.flock(ser.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: print ":: K2400 :: Port temporarily unavailable ..... exit" sys.exit() else: print ":: K2400 :: Some problems with Port ...... exit" sys.exit() ser.flushOutput() ser.flushInput() logging.info("INIT_PORT") return ser def init_controler(ser): if ser.isOpen(): try: ser.flushInput() # flush input buffer, discarding all its contents ser.flushOutput() # flush output buffer, aborting current output # and discard all that is in buffer port_write(ser, ':ABORt') port_write(ser, "*RST") port_write(ser, "TRIG:CLE") # Clear any pending input triggers port_write(ser, ":SYSTem:TIME:RESet:AUTO 0") # zerujemy zegar port_write(ser, ":SYSTem:LFRequency:AUTO ON") # ustawiamy automatyczny dobor czestotliwosci port_write(ser, ":SYSTem:BEEPer 300, 0.4") port_write(ser, ":FORM:ELEM CURR,TIME") # port_write(ser,":DISPlay:ENABle OFF") except Exception, e1: print ":: ERROR: promlem with communicating ...: " + str(e1) else: print ":: ERROR: Can not open serial port: ", ser logging.info("INIT_CONTROLER") return 0 def init_controler_pic(ser): if ser.isOpen(): try: ser.flushInput() # flush input buffer, discarding all its contents ser.flushOutput() # flush output buffer, aborting current output # and discard all that is in buffer port_write(ser, "*CLS") port_write(ser, "*RST") port_write(ser, "FORM:ELEM READ,TIME") port_write(ser, ":RANGe:AUTO:ULIMit") port_write(ser, "CURR:NPLC " + str(PICNPLC / 2)) port_write(ser, ":MEASure?") # port_write(ser,":DISPlay:ENABle OFF") except Exception, e1: print ":: ERROR: problem with communicating ...: " + str(e1) else: print ":: ERROR: Can not open serial port: ", ser logging.info("INIT_CONTROLER_PIC") return 0 def port_close(ser): print ":: Ports close...." for s in ser: logging.info(s) s.close() logging.info("PORT: CLOSE") def format_e(n): a = '%E' % n return a.split('E')[0].rstrip('0').rstrip('.') + 'E' + a.split('E')[1] def switch(): with open(FILE_SWITCH, 'r') as f: line = str(f.readline()) line = line.rstrip() if line == "OFF": logging.info("USE SWITCH: OFF") with open(FILE_SWITCH, 'w+') as the_file: the_file.write("ON") return "OFF" else: return "ON" def start(): with open(FILE_START, 'r') as f: line = str(f.readline()) line = line.rstrip() if line == "START": logging.info("START measurements freom start funcztion") with open(FILE_START, 'w+') as the_file: the_file.write("STOP") return 1 else: return 0 def change(): VGS = -888 VDS = -888 if os.path.isfile(FILE_CHANGE) and os.path.getsize(FILE_CHANGE) > 0: with open(FILE_CHANGE, 'r') as f: lines = len(f.readlines()) f.seek(0) if lines == 2: line = f.readlines() if line[0].split("=")[0] == "VGS": VGS = float(line[0].split("=")[1]) elif line[0].split("=")[0] == "VDS": VDS = float(line[0].split("=")[1]) if line[1].split("=")[0] == "VGS": VGS = float(line[1].split("=")[1]) elif line[1].split("=")[0] == "VDS": VDS = float(line[1].split("=")[1]) else: return False, 0, 0 if lines == 1: line = f.readlines() if line[0].split("=")[0] == "VGS": VGS = float(line[0].split("=")[1]) elif line[0].split("=")[0] == "VDS": VDS = float(line[0].split("=")[1]) else: return False, 0, 0 with open(FILE_CHANGE, "w"): pass txt = "#------ comments: Change VGS or VDS: " + str(line) log_save(txt) # print txt return "True", VGS, VDS else: return "False", VGS, VDS def file_header(ser_VGS, ser_VDS): logging.info("CREATE FILE HEADER") return 0 def file_footer(ser_VGS, ser_VDS): logging.info("CREATE FILE FOOTER") global File off(ser_VGS) off(ser_VDS) with open(FILE_SSHT, 'w+') as the_file: the_file.write("OFF") sht() log_save("#\n#----------------------- FOOTER ------------------------------------") log_save("# DEV_1 VDS: " + " " + port_wr(ser_VDS, '*IDN?')) log_save("# DEV_1 VDS: " + str(ser_VDS)) log_save("# DEV_2 VGS: " + " " + port_wr(ser_VGS, '*IDN?')) log_save("# DEV_1 VDS: " + str(ser_VGS)) log_save("#") log_save(port_error(ser_VDS, "VDS")) log_save(port_error(ser_VGS, "VGS")) log_save("#") log_save("# ----* zajaczkowksi@mpip-mainz.mpg.de *-------------------------------") log_save("# ----- End of file " + str(datetime.now()) + " created by:" + str(__file__)) global FF print "=====================================" print FF print "=====================================" splt() return 0 def splt(): global PLOT if PLOT: try: plt.save_png(FF+".png") except ValueError: print "Porblem z zapisem obrazka :( " pass def off(ser): logging.info("SENS OFF [:OUTP OFF]") port_write(ser, ":OUTP OFF") def port_error(ser, cmd): logging.info("READ ERRORS") err = [":STAT:MEAS? ", ":SYST:ERR:ALL?", "*ESR? ", "*OPC? ", ":STAT:OPER? ", ":STAT:MEAS? ", ":STAT:QUES? "] out = "# ======= ERROR'S @ " + str(cmd) + "\n#" for x in err: out += "# " + str(x) + " \t " + str(port_wr(ser, x)) + " \n#" # print out return out # ============================= MAIN if __name__ == '__main__': args = K2400_help.help() log_reset() print args.test # ------- podstawowe parametry: if args.filename != "tr00": FILE_NAME = args.filename if args.shutter: print "SHUTTER WILL BE OPEN" with open(FILE_SSHT, 'w+') as the_file: the_file.write("ON " + str(args.shutter)) if args.out: # ------- OUTPUT: parametry pomiaru FName = PATH_DATA + FILE_NAME VDS_start = float(args.out[0][0]) VDS_stop = float(args.out[0][1]) VDS_step = float(args.out[0][2]) VDS_comp = float(args.DCOMP) VGS_start = float(args.out[0][3]) VGS_stop = float(args.out[0][4]) VGS_step = float(args.out[0][5]) VGS_comp = float(args.GCOMP) NPLC = float(args.NPLC) PICNPLC = NPLC DEL = float(args.DEL) SWEEP = True PLOT = args.fig ser = init() output_param = [FName, VDS_start, VDS_stop, VDS_step, VDS_comp, VGS_start, VGS_stop, VGS_step, VGS_comp, NPLC, DEL, SWEEP] K2400_output.output_steps(ser, *output_param) port_close(ser) if args.timee: # ------- TIME: parametry pomiaru FName = PATH_DATA + FILE_NAME VDS = float(args.timee[0][0]) VGS = float(args.timee[0][1]) VDS_comp = args.DCOMP VGS_comp = args.GCOMP NPLC = args.NPLC PICNPLC = NPLC DEL = args.DEL sht = args.shutter ww = args.waitstart PLOT = args.fig ser = init() param_timesteps = [FName, VDS, VDS_comp, VGS, VGS_comp, NPLC, DEL, sht, ww] K2400_time.time_steps(ser, *param_timesteps) port_close(ser) if args.trans: # ------- TRANSFER: parametry pomiaru FName = PATH_DATA + FILE_NAME VDS = args.trans[0][0] VGS_start = args.trans[0][1] VGS_stop = args.trans[0][2] VGS_step = args.trans[0][3] VDS_comp = args.DCOMP VGS_comp = args.GCOMP NPLC = args.NPLC PICNPLC = NPLC DEL = args.DEL SWEEP = args.sweep ttime = args.wait sht = args.shutter PLOT = args.fig param_transfer = [FName, VDS, VGS_start, VGS_stop, VGS_step, VDS_comp, VGS_comp, NPLC, DEL, SWEEP, ttime, sht] ser = init() K2400_transfer.transfer_steps(ser, *param_transfer) port_close(ser) sys.exit() if args.test: data=PATH_DATA+"test.txt" print PLOT log_init(data) PLOT = args.fig t0=time.time() x=-20 while True:
splt()
x+=0.5 y=2*x*x-2*x+1 txt=str(x)+" "+str(y) log_save(txt) t1=time.time() print t1-t0 if x>=20: break
conditional_block
K2400.py
#!/usr/bin/python # -*- coding: utf-8 -*- import K2400_help import K2400_time import K2400_output import K2400_plot import K2400_transfer import signal import logging import serial import os import time import fcntl import sys import shutil from datetime import datetime # -- new USB_VGS = "/dev/my_USB_K2400_VGS" USB_VDS = "/dev/my_USB_K2400_VDS" USB_PICO = "/dev/my_USB_PICO" PATH = os.path.realpath(__file__) PATH = os.path.splitdrive(PATH)[1] PATH = os.path.dirname(os.path.dirname(PATH)) PATH_LOG = PATH + "/log/" PATH_DATA = PATH + "/data/" FILE_SSHT = PATH + "/wd/shutter.txt" FILE_START = PATH + "/wd/start.txt" FILE_CHANGE = PATH + "/wd/change.txt" FILE_SWITCH = PATH + "/wd/switch.txt" FILE_COMMENTS = PATH + "/wd/comments.txt" FILE_NAME = "TR_TEST" PICNPLC = 1.0 SSHT = 0 FILE = 0 FF = " " PLOT = "" plt=K2400_plot.plot() def sht(): return 0 def sht_change(): return 0 def port_write(ser, cmd): cmd = cmd + "\n" ser.write(cmd) return 0 def port_read(ser): out = '' c = ser.read() while c != '\r': # \n out += c c = ser.read() return out def port_wr(ser, cmd): # port write and read port_write(ser, cmd) return str(port_read(ser)) def get_TUI(ser): return port_wr(ser, "READ?").split(",") def get_PICO(ser): return port_wr(ser, "MEASure?") def set_voltage2(a): return set_voltage(*a) def set_voltage(ser, set_VOLT): port_write(ser, ":SOUR:VOLT:LEV " + str(set_VOLT)) # Set source output level to 10V. return port_wr(ser, "READ?").split(",") # Trigger and acquire one data string. def set_voltage_init(ser, set_VOLT, set_compl, NPLC, DELAY): print "================", set_VOLT logging.info("SET_VOLTAGE_INIT: " + str(ser)) logging.info(str(set_VOLT) + " " + str(set_compl) + " " + str(NPLC) + " " + str(DELAY) + " OUTPUT ON") port_write(ser, ":SOUR:FUNC VOLT") # Select voltage source function. port_write(ser, ":SOUR:VOLT:MODE FIX") # Select fixed voltage source mode. port_write(ser, ":SOUR:VOLT:LEV " + str(float(set_VOLT))) # Set source output level to 10V. port_write(ser, ":SOUR:DEL " + str(DELAY)) # Set delay between set volt and measure port_write(ser, ":SENS:FUNC 'CURR'") # Select current measurement function. port_write(ser, ":SENS:CURR:NPLC " + str(NPLC)) # Select current measurement function. if set_compl != 0: port_write(ser, ":SENS:CURR:PROT " + str(set_compl)) # Set compliance limit to 10mA. port_write(ser, ":SENS:CURR:RANG " + str(set_compl)) # Select the 10mA measurement range. else: port_write(ser, ":SENS:CURR:PROT AUTO") # Set compliance limit to auto. port_write(ser, ":SENS:CURR:RANG AUTO") # Select the auto measurement range. port_write(ser, ":OUTP ON") # Turn the output on. def log_reset(): with open(FILE_COMMENTS, "w") as f: f.write("init_log") with open(FILE_SSHT, "w") as f: f.write("0") with open(FILE_CHANGE, "w") as f: f.write("") with open(FILE_SWITCH, 'w') as f: f.write("ON") with open(FILE_START, 'w') as f: f.write("STOP") def log_init(FName): global FILE global FF global PLOT # global plt # if PLOT: # plt=K2400_plot.plot() FName = checkfile(FName) FF = FName logging.info("Create NEW FILE: " + FName)
with open(PATH + "/log/FName_Last", 'r') as f: LastFName = str(f.readline()) with open(PATH + "/log/FName_Last", 'w') as f: f.write(FName) with open(PATH + "/FName", 'w') as f: f.write(FName) shutil.move(str(PATH) + "/raw.txt", LastFName + ".raw") with open(str(PATH) + "/raw.txt", 'aw+') as the_file: the_file.write("") FILE = open(str(FName), "w") FILE.write("#-------------- Creating on " + str(os.uname()[1]) + " @ " + str(datetime.now()) + "\n") return 0 def log_save(txt): global PLOT #global plt log_save_to_file(txt) log_save_comments() log_save_raw(txt) if PLOT: try: x = float(txt.split()[0]) y = float(txt.split()[1]) plt.plt_update(x, y) except ValueError: pass def log_save_comments(): if os.path.isfile(FILE_COMMENTS) and os.path.getsize(FILE_COMMENTS) > 0: with open(FILE_COMMENTS, "r") as f: txt = "#------ comments: " + str(f.readline()).rstrip() with open(FILE_COMMENTS, "w"): pass print txt log_save_to_file(txt) def log_save_to_file(txt): global FILE FILE.write(str(txt) + "\n") def log_save_raw(txt): with open(PATH_LOG + '/raw_all.txt', 'a') as the_file: the_file.write(str(txt) + "\n") with open(PATH + '/raw.txt', 'a') as the_file: the_file.write(str(txt) + "\n") print txt return 0 def checkfile(path): logging.info("CHECK FILE: " + path) path = os.path.expanduser(path) root, ext = os.path.splitext(os.path.expanduser(path)) dir = os.path.dirname(root) fname = os.path.basename(root) candidate = fname + ext index = 1 ls = set(os.listdir(dir)) while candidate in ls: candidate = "{0}_{1:02d}{2}".format(fname, index, ext) index += 1 ffname = os.path.join(dir, candidate) logging.info("CHECK FILE: " + path) logging.info("CHECK FILE will save:" + ffname) return ffname def signal_handler(signal, frame): K2400.log_save("#::::: ABORT ::::::") print "\n:: You pressed Ctrl+C!" print "::Programm will be TERMINATED ... \n . . . . . . . . . ." with open(FILE_SWITCH, 'w+') as the_file: the_file.write("OFF") def init(): global ser_VGS global ser_VDS logging.info("--------------- NEW ------------------") logging.info("INIT - START") logging.info("INIT:: USB_VDS:" + USB_VDS) logging.info("INIT:: USB_VGS:" + USB_VGS) logging.info("INIT:: USB_PIC:" + USB_PICO) ser_VDS = init_port(USB_VDS) ser_VGS = init_port(USB_VGS) ser_PIC = init_port(USB_PICO) logging.info("INIT:: ser_VDS:" + str(ser_VDS)) logging.info("INIT:: ser_VGS:" + str(ser_VGS)) logging.info("INIT:: ser_PIC:" + str(ser_PIC)) init_controler(ser_VDS) init_controler(ser_VGS) init_controler_pic(ser_PIC) logging.info("INIT - END") return ser_VDS, ser_VGS, ser_PIC def init_port(PortUSB): signal.signal(signal.SIGINT, signal_handler) ser = serial.Serial( port=PortUSB, baudrate=9600, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1, xonxoff=0, rtscts=1) if ser.isOpen(): try: fcntl.flock(ser.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: print ":: K2400 :: Port temporarily unavailable ..... exit" sys.exit() else: print ":: K2400 :: Some problems with Port ...... exit" sys.exit() ser.flushOutput() ser.flushInput() logging.info("INIT_PORT") return ser def init_controler(ser): if ser.isOpen(): try: ser.flushInput() # flush input buffer, discarding all its contents ser.flushOutput() # flush output buffer, aborting current output # and discard all that is in buffer port_write(ser, ':ABORt') port_write(ser, "*RST") port_write(ser, "TRIG:CLE") # Clear any pending input triggers port_write(ser, ":SYSTem:TIME:RESet:AUTO 0") # zerujemy zegar port_write(ser, ":SYSTem:LFRequency:AUTO ON") # ustawiamy automatyczny dobor czestotliwosci port_write(ser, ":SYSTem:BEEPer 300, 0.4") port_write(ser, ":FORM:ELEM CURR,TIME") # port_write(ser,":DISPlay:ENABle OFF") except Exception, e1: print ":: ERROR: promlem with communicating ...: " + str(e1) else: print ":: ERROR: Can not open serial port: ", ser logging.info("INIT_CONTROLER") return 0 def init_controler_pic(ser): if ser.isOpen(): try: ser.flushInput() # flush input buffer, discarding all its contents ser.flushOutput() # flush output buffer, aborting current output # and discard all that is in buffer port_write(ser, "*CLS") port_write(ser, "*RST") port_write(ser, "FORM:ELEM READ,TIME") port_write(ser, ":RANGe:AUTO:ULIMit") port_write(ser, "CURR:NPLC " + str(PICNPLC / 2)) port_write(ser, ":MEASure?") # port_write(ser,":DISPlay:ENABle OFF") except Exception, e1: print ":: ERROR: problem with communicating ...: " + str(e1) else: print ":: ERROR: Can not open serial port: ", ser logging.info("INIT_CONTROLER_PIC") return 0 def port_close(ser): print ":: Ports close...." for s in ser: logging.info(s) s.close() logging.info("PORT: CLOSE") def format_e(n): a = '%E' % n return a.split('E')[0].rstrip('0').rstrip('.') + 'E' + a.split('E')[1] def switch(): with open(FILE_SWITCH, 'r') as f: line = str(f.readline()) line = line.rstrip() if line == "OFF": logging.info("USE SWITCH: OFF") with open(FILE_SWITCH, 'w+') as the_file: the_file.write("ON") return "OFF" else: return "ON" def start(): with open(FILE_START, 'r') as f: line = str(f.readline()) line = line.rstrip() if line == "START": logging.info("START measurements freom start funcztion") with open(FILE_START, 'w+') as the_file: the_file.write("STOP") return 1 else: return 0 def change(): VGS = -888 VDS = -888 if os.path.isfile(FILE_CHANGE) and os.path.getsize(FILE_CHANGE) > 0: with open(FILE_CHANGE, 'r') as f: lines = len(f.readlines()) f.seek(0) if lines == 2: line = f.readlines() if line[0].split("=")[0] == "VGS": VGS = float(line[0].split("=")[1]) elif line[0].split("=")[0] == "VDS": VDS = float(line[0].split("=")[1]) if line[1].split("=")[0] == "VGS": VGS = float(line[1].split("=")[1]) elif line[1].split("=")[0] == "VDS": VDS = float(line[1].split("=")[1]) else: return False, 0, 0 if lines == 1: line = f.readlines() if line[0].split("=")[0] == "VGS": VGS = float(line[0].split("=")[1]) elif line[0].split("=")[0] == "VDS": VDS = float(line[0].split("=")[1]) else: return False, 0, 0 with open(FILE_CHANGE, "w"): pass txt = "#------ comments: Change VGS or VDS: " + str(line) log_save(txt) # print txt return "True", VGS, VDS else: return "False", VGS, VDS def file_header(ser_VGS, ser_VDS): logging.info("CREATE FILE HEADER") return 0 def file_footer(ser_VGS, ser_VDS): logging.info("CREATE FILE FOOTER") global File off(ser_VGS) off(ser_VDS) with open(FILE_SSHT, 'w+') as the_file: the_file.write("OFF") sht() log_save("#\n#----------------------- FOOTER ------------------------------------") log_save("# DEV_1 VDS: " + " " + port_wr(ser_VDS, '*IDN?')) log_save("# DEV_1 VDS: " + str(ser_VDS)) log_save("# DEV_2 VGS: " + " " + port_wr(ser_VGS, '*IDN?')) log_save("# DEV_1 VDS: " + str(ser_VGS)) log_save("#") log_save(port_error(ser_VDS, "VDS")) log_save(port_error(ser_VGS, "VGS")) log_save("#") log_save("# ----* zajaczkowksi@mpip-mainz.mpg.de *-------------------------------") log_save("# ----- End of file " + str(datetime.now()) + " created by:" + str(__file__)) global FF print "=====================================" print FF print "=====================================" splt() return 0 def splt(): global PLOT if PLOT: try: plt.save_png(FF+".png") except ValueError: print "Porblem z zapisem obrazka :( " pass def off(ser): logging.info("SENS OFF [:OUTP OFF]") port_write(ser, ":OUTP OFF") def port_error(ser, cmd): logging.info("READ ERRORS") err = [":STAT:MEAS? ", ":SYST:ERR:ALL?", "*ESR? ", "*OPC? ", ":STAT:OPER? ", ":STAT:MEAS? ", ":STAT:QUES? "] out = "# ======= ERROR'S @ " + str(cmd) + "\n#" for x in err: out += "# " + str(x) + " \t " + str(port_wr(ser, x)) + " \n#" # print out return out # ============================= MAIN if __name__ == '__main__': args = K2400_help.help() log_reset() print args.test # ------- podstawowe parametry: if args.filename != "tr00": FILE_NAME = args.filename if args.shutter: print "SHUTTER WILL BE OPEN" with open(FILE_SSHT, 'w+') as the_file: the_file.write("ON " + str(args.shutter)) if args.out: # ------- OUTPUT: parametry pomiaru FName = PATH_DATA + FILE_NAME VDS_start = float(args.out[0][0]) VDS_stop = float(args.out[0][1]) VDS_step = float(args.out[0][2]) VDS_comp = float(args.DCOMP) VGS_start = float(args.out[0][3]) VGS_stop = float(args.out[0][4]) VGS_step = float(args.out[0][5]) VGS_comp = float(args.GCOMP) NPLC = float(args.NPLC) PICNPLC = NPLC DEL = float(args.DEL) SWEEP = True PLOT = args.fig ser = init() output_param = [FName, VDS_start, VDS_stop, VDS_step, VDS_comp, VGS_start, VGS_stop, VGS_step, VGS_comp, NPLC, DEL, SWEEP] K2400_output.output_steps(ser, *output_param) port_close(ser) if args.timee: # ------- TIME: parametry pomiaru FName = PATH_DATA + FILE_NAME VDS = float(args.timee[0][0]) VGS = float(args.timee[0][1]) VDS_comp = args.DCOMP VGS_comp = args.GCOMP NPLC = args.NPLC PICNPLC = NPLC DEL = args.DEL sht = args.shutter ww = args.waitstart PLOT = args.fig ser = init() param_timesteps = [FName, VDS, VDS_comp, VGS, VGS_comp, NPLC, DEL, sht, ww] K2400_time.time_steps(ser, *param_timesteps) port_close(ser) if args.trans: # ------- TRANSFER: parametry pomiaru FName = PATH_DATA + FILE_NAME VDS = args.trans[0][0] VGS_start = args.trans[0][1] VGS_stop = args.trans[0][2] VGS_step = args.trans[0][3] VDS_comp = args.DCOMP VGS_comp = args.GCOMP NPLC = args.NPLC PICNPLC = NPLC DEL = args.DEL SWEEP = args.sweep ttime = args.wait sht = args.shutter PLOT = args.fig param_transfer = [FName, VDS, VGS_start, VGS_stop, VGS_step, VDS_comp, VGS_comp, NPLC, DEL, SWEEP, ttime, sht] ser = init() K2400_transfer.transfer_steps(ser, *param_transfer) port_close(ser) sys.exit() if args.test: data=PATH_DATA+"test.txt" print PLOT log_init(data) PLOT = args.fig t0=time.time() x=-20 while True: x+=0.5 y=2*x*x-2*x+1 txt=str(x)+" "+str(y) log_save(txt) t1=time.time() print t1-t0 if x>=20: break splt()
print "=====================================" print FName print "====================================="
random_line_split
K2400.py
#!/usr/bin/python # -*- coding: utf-8 -*- import K2400_help import K2400_time import K2400_output import K2400_plot import K2400_transfer import signal import logging import serial import os import time import fcntl import sys import shutil from datetime import datetime # -- new USB_VGS = "/dev/my_USB_K2400_VGS" USB_VDS = "/dev/my_USB_K2400_VDS" USB_PICO = "/dev/my_USB_PICO" PATH = os.path.realpath(__file__) PATH = os.path.splitdrive(PATH)[1] PATH = os.path.dirname(os.path.dirname(PATH)) PATH_LOG = PATH + "/log/" PATH_DATA = PATH + "/data/" FILE_SSHT = PATH + "/wd/shutter.txt" FILE_START = PATH + "/wd/start.txt" FILE_CHANGE = PATH + "/wd/change.txt" FILE_SWITCH = PATH + "/wd/switch.txt" FILE_COMMENTS = PATH + "/wd/comments.txt" FILE_NAME = "TR_TEST" PICNPLC = 1.0 SSHT = 0 FILE = 0 FF = " " PLOT = "" plt=K2400_plot.plot() def sht(): return 0 def sht_change(): return 0 def port_write(ser, cmd): cmd = cmd + "\n" ser.write(cmd) return 0 def port_read(ser): out = '' c = ser.read() while c != '\r': # \n out += c c = ser.read() return out def port_wr(ser, cmd): # port write and read port_write(ser, cmd) return str(port_read(ser)) def get_TUI(ser): return port_wr(ser, "READ?").split(",") def get_PICO(ser): return port_wr(ser, "MEASure?") def set_voltage2(a): return set_voltage(*a) def set_voltage(ser, set_VOLT): port_write(ser, ":SOUR:VOLT:LEV " + str(set_VOLT)) # Set source output level to 10V. return port_wr(ser, "READ?").split(",") # Trigger and acquire one data string. def set_voltage_init(ser, set_VOLT, set_compl, NPLC, DELAY): print "================", set_VOLT logging.info("SET_VOLTAGE_INIT: " + str(ser)) logging.info(str(set_VOLT) + " " + str(set_compl) + " " + str(NPLC) + " " + str(DELAY) + " OUTPUT ON") port_write(ser, ":SOUR:FUNC VOLT") # Select voltage source function. port_write(ser, ":SOUR:VOLT:MODE FIX") # Select fixed voltage source mode. port_write(ser, ":SOUR:VOLT:LEV " + str(float(set_VOLT))) # Set source output level to 10V. port_write(ser, ":SOUR:DEL " + str(DELAY)) # Set delay between set volt and measure port_write(ser, ":SENS:FUNC 'CURR'") # Select current measurement function. port_write(ser, ":SENS:CURR:NPLC " + str(NPLC)) # Select current measurement function. if set_compl != 0: port_write(ser, ":SENS:CURR:PROT " + str(set_compl)) # Set compliance limit to 10mA. port_write(ser, ":SENS:CURR:RANG " + str(set_compl)) # Select the 10mA measurement range. else: port_write(ser, ":SENS:CURR:PROT AUTO") # Set compliance limit to auto. port_write(ser, ":SENS:CURR:RANG AUTO") # Select the auto measurement range. port_write(ser, ":OUTP ON") # Turn the output on. def log_reset(): with open(FILE_COMMENTS, "w") as f: f.write("init_log") with open(FILE_SSHT, "w") as f: f.write("0") with open(FILE_CHANGE, "w") as f: f.write("") with open(FILE_SWITCH, 'w') as f: f.write("ON") with open(FILE_START, 'w') as f: f.write("STOP") def log_init(FName): global FILE global FF global PLOT # global plt # if PLOT: # plt=K2400_plot.plot() FName = checkfile(FName) FF = FName logging.info("Create NEW FILE: " + FName) print "=====================================" print FName print "=====================================" with open(PATH + "/log/FName_Last", 'r') as f: LastFName = str(f.readline()) with open(PATH + "/log/FName_Last", 'w') as f: f.write(FName) with open(PATH + "/FName", 'w') as f: f.write(FName) shutil.move(str(PATH) + "/raw.txt", LastFName + ".raw") with open(str(PATH) + "/raw.txt", 'aw+') as the_file: the_file.write("") FILE = open(str(FName), "w") FILE.write("#-------------- Creating on " + str(os.uname()[1]) + " @ " + str(datetime.now()) + "\n") return 0 def log_save(txt): global PLOT #global plt log_save_to_file(txt) log_save_comments() log_save_raw(txt) if PLOT: try: x = float(txt.split()[0]) y = float(txt.split()[1]) plt.plt_update(x, y) except ValueError: pass def log_save_comments(): if os.path.isfile(FILE_COMMENTS) and os.path.getsize(FILE_COMMENTS) > 0: with open(FILE_COMMENTS, "r") as f: txt = "#------ comments: " + str(f.readline()).rstrip() with open(FILE_COMMENTS, "w"): pass print txt log_save_to_file(txt) def log_save_to_file(txt): global FILE FILE.write(str(txt) + "\n") def log_save_raw(txt): with open(PATH_LOG + '/raw_all.txt', 'a') as the_file: the_file.write(str(txt) + "\n") with open(PATH + '/raw.txt', 'a') as the_file: the_file.write(str(txt) + "\n") print txt return 0 def checkfile(path):
def signal_handler(signal, frame): K2400.log_save("#::::: ABORT ::::::") print "\n:: You pressed Ctrl+C!" print "::Programm will be TERMINATED ... \n . . . . . . . . . ." with open(FILE_SWITCH, 'w+') as the_file: the_file.write("OFF") def init(): global ser_VGS global ser_VDS logging.info("--------------- NEW ------------------") logging.info("INIT - START") logging.info("INIT:: USB_VDS:" + USB_VDS) logging.info("INIT:: USB_VGS:" + USB_VGS) logging.info("INIT:: USB_PIC:" + USB_PICO) ser_VDS = init_port(USB_VDS) ser_VGS = init_port(USB_VGS) ser_PIC = init_port(USB_PICO) logging.info("INIT:: ser_VDS:" + str(ser_VDS)) logging.info("INIT:: ser_VGS:" + str(ser_VGS)) logging.info("INIT:: ser_PIC:" + str(ser_PIC)) init_controler(ser_VDS) init_controler(ser_VGS) init_controler_pic(ser_PIC) logging.info("INIT - END") return ser_VDS, ser_VGS, ser_PIC def init_port(PortUSB): signal.signal(signal.SIGINT, signal_handler) ser = serial.Serial( port=PortUSB, baudrate=9600, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1, xonxoff=0, rtscts=1) if ser.isOpen(): try: fcntl.flock(ser.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: print ":: K2400 :: Port temporarily unavailable ..... exit" sys.exit() else: print ":: K2400 :: Some problems with Port ...... exit" sys.exit() ser.flushOutput() ser.flushInput() logging.info("INIT_PORT") return ser def init_controler(ser): if ser.isOpen(): try: ser.flushInput() # flush input buffer, discarding all its contents ser.flushOutput() # flush output buffer, aborting current output # and discard all that is in buffer port_write(ser, ':ABORt') port_write(ser, "*RST") port_write(ser, "TRIG:CLE") # Clear any pending input triggers port_write(ser, ":SYSTem:TIME:RESet:AUTO 0") # zerujemy zegar port_write(ser, ":SYSTem:LFRequency:AUTO ON") # ustawiamy automatyczny dobor czestotliwosci port_write(ser, ":SYSTem:BEEPer 300, 0.4") port_write(ser, ":FORM:ELEM CURR,TIME") # port_write(ser,":DISPlay:ENABle OFF") except Exception, e1: print ":: ERROR: promlem with communicating ...: " + str(e1) else: print ":: ERROR: Can not open serial port: ", ser logging.info("INIT_CONTROLER") return 0 def init_controler_pic(ser): if ser.isOpen(): try: ser.flushInput() # flush input buffer, discarding all its contents ser.flushOutput() # flush output buffer, aborting current output # and discard all that is in buffer port_write(ser, "*CLS") port_write(ser, "*RST") port_write(ser, "FORM:ELEM READ,TIME") port_write(ser, ":RANGe:AUTO:ULIMit") port_write(ser, "CURR:NPLC " + str(PICNPLC / 2)) port_write(ser, ":MEASure?") # port_write(ser,":DISPlay:ENABle OFF") except Exception, e1: print ":: ERROR: problem with communicating ...: " + str(e1) else: print ":: ERROR: Can not open serial port: ", ser logging.info("INIT_CONTROLER_PIC") return 0 def port_close(ser): print ":: Ports close...." for s in ser: logging.info(s) s.close() logging.info("PORT: CLOSE") def format_e(n): a = '%E' % n return a.split('E')[0].rstrip('0').rstrip('.') + 'E' + a.split('E')[1] def switch(): with open(FILE_SWITCH, 'r') as f: line = str(f.readline()) line = line.rstrip() if line == "OFF": logging.info("USE SWITCH: OFF") with open(FILE_SWITCH, 'w+') as the_file: the_file.write("ON") return "OFF" else: return "ON" def start(): with open(FILE_START, 'r') as f: line = str(f.readline()) line = line.rstrip() if line == "START": logging.info("START measurements freom start funcztion") with open(FILE_START, 'w+') as the_file: the_file.write("STOP") return 1 else: return 0 def change(): VGS = -888 VDS = -888 if os.path.isfile(FILE_CHANGE) and os.path.getsize(FILE_CHANGE) > 0: with open(FILE_CHANGE, 'r') as f: lines = len(f.readlines()) f.seek(0) if lines == 2: line = f.readlines() if line[0].split("=")[0] == "VGS": VGS = float(line[0].split("=")[1]) elif line[0].split("=")[0] == "VDS": VDS = float(line[0].split("=")[1]) if line[1].split("=")[0] == "VGS": VGS = float(line[1].split("=")[1]) elif line[1].split("=")[0] == "VDS": VDS = float(line[1].split("=")[1]) else: return False, 0, 0 if lines == 1: line = f.readlines() if line[0].split("=")[0] == "VGS": VGS = float(line[0].split("=")[1]) elif line[0].split("=")[0] == "VDS": VDS = float(line[0].split("=")[1]) else: return False, 0, 0 with open(FILE_CHANGE, "w"): pass txt = "#------ comments: Change VGS or VDS: " + str(line) log_save(txt) # print txt return "True", VGS, VDS else: return "False", VGS, VDS def file_header(ser_VGS, ser_VDS): logging.info("CREATE FILE HEADER") return 0 def file_footer(ser_VGS, ser_VDS): logging.info("CREATE FILE FOOTER") global File off(ser_VGS) off(ser_VDS) with open(FILE_SSHT, 'w+') as the_file: the_file.write("OFF") sht() log_save("#\n#----------------------- FOOTER ------------------------------------") log_save("# DEV_1 VDS: " + " " + port_wr(ser_VDS, '*IDN?')) log_save("# DEV_1 VDS: " + str(ser_VDS)) log_save("# DEV_2 VGS: " + " " + port_wr(ser_VGS, '*IDN?')) log_save("# DEV_1 VDS: " + str(ser_VGS)) log_save("#") log_save(port_error(ser_VDS, "VDS")) log_save(port_error(ser_VGS, "VGS")) log_save("#") log_save("# ----* zajaczkowksi@mpip-mainz.mpg.de *-------------------------------") log_save("# ----- End of file " + str(datetime.now()) + " created by:" + str(__file__)) global FF print "=====================================" print FF print "=====================================" splt() return 0 def splt(): global PLOT if PLOT: try: plt.save_png(FF+".png") except ValueError: print "Porblem z zapisem obrazka :( " pass def off(ser): logging.info("SENS OFF [:OUTP OFF]") port_write(ser, ":OUTP OFF") def port_error(ser, cmd): logging.info("READ ERRORS") err = [":STAT:MEAS? ", ":SYST:ERR:ALL?", "*ESR? ", "*OPC? ", ":STAT:OPER? ", ":STAT:MEAS? ", ":STAT:QUES? "] out = "# ======= ERROR'S @ " + str(cmd) + "\n#" for x in err: out += "# " + str(x) + " \t " + str(port_wr(ser, x)) + " \n#" # print out return out # ============================= MAIN if __name__ == '__main__': args = K2400_help.help() log_reset() print args.test # ------- podstawowe parametry: if args.filename != "tr00": FILE_NAME = args.filename if args.shutter: print "SHUTTER WILL BE OPEN" with open(FILE_SSHT, 'w+') as the_file: the_file.write("ON " + str(args.shutter)) if args.out: # ------- OUTPUT: parametry pomiaru FName = PATH_DATA + FILE_NAME VDS_start = float(args.out[0][0]) VDS_stop = float(args.out[0][1]) VDS_step = float(args.out[0][2]) VDS_comp = float(args.DCOMP) VGS_start = float(args.out[0][3]) VGS_stop = float(args.out[0][4]) VGS_step = float(args.out[0][5]) VGS_comp = float(args.GCOMP) NPLC = float(args.NPLC) PICNPLC = NPLC DEL = float(args.DEL) SWEEP = True PLOT = args.fig ser = init() output_param = [FName, VDS_start, VDS_stop, VDS_step, VDS_comp, VGS_start, VGS_stop, VGS_step, VGS_comp, NPLC, DEL, SWEEP] K2400_output.output_steps(ser, *output_param) port_close(ser) if args.timee: # ------- TIME: parametry pomiaru FName = PATH_DATA + FILE_NAME VDS = float(args.timee[0][0]) VGS = float(args.timee[0][1]) VDS_comp = args.DCOMP VGS_comp = args.GCOMP NPLC = args.NPLC PICNPLC = NPLC DEL = args.DEL sht = args.shutter ww = args.waitstart PLOT = args.fig ser = init() param_timesteps = [FName, VDS, VDS_comp, VGS, VGS_comp, NPLC, DEL, sht, ww] K2400_time.time_steps(ser, *param_timesteps) port_close(ser) if args.trans: # ------- TRANSFER: parametry pomiaru FName = PATH_DATA + FILE_NAME VDS = args.trans[0][0] VGS_start = args.trans[0][1] VGS_stop = args.trans[0][2] VGS_step = args.trans[0][3] VDS_comp = args.DCOMP VGS_comp = args.GCOMP NPLC = args.NPLC PICNPLC = NPLC DEL = args.DEL SWEEP = args.sweep ttime = args.wait sht = args.shutter PLOT = args.fig param_transfer = [FName, VDS, VGS_start, VGS_stop, VGS_step, VDS_comp, VGS_comp, NPLC, DEL, SWEEP, ttime, sht] ser = init() K2400_transfer.transfer_steps(ser, *param_transfer) port_close(ser) sys.exit() if args.test: data=PATH_DATA+"test.txt" print PLOT log_init(data) PLOT = args.fig t0=time.time() x=-20 while True: x+=0.5 y=2*x*x-2*x+1 txt=str(x)+" "+str(y) log_save(txt) t1=time.time() print t1-t0 if x>=20: break splt()
logging.info("CHECK FILE: " + path) path = os.path.expanduser(path) root, ext = os.path.splitext(os.path.expanduser(path)) dir = os.path.dirname(root) fname = os.path.basename(root) candidate = fname + ext index = 1 ls = set(os.listdir(dir)) while candidate in ls: candidate = "{0}_{1:02d}{2}".format(fname, index, ext) index += 1 ffname = os.path.join(dir, candidate) logging.info("CHECK FILE: " + path) logging.info("CHECK FILE will save:" + ffname) return ffname
identifier_body
maneuver_spreadsheet_support.py
import numpy as np import pymysql from maneuvermodel import maneuveringfish, optimize from scipy.interpolate import RectBivariateSpline # had to sys.path.append("/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/") when it stopped finding the module #----------------------------------------------------------------------------------------------------------------------- # BASIC SUMMARY #----------------------------------------------------------------------------------------------------------------------- # This file is designed to explore the relevant boundaries for the interpolation tables, erring on the side of making # their coverage comfortably more expansive than needed. It includes justifications for the dimensions of the tables as # well as which tables are available, and it places job descriptions for the table calculator in a database. # Data will eventually be organized by subfolders by fork length and current speed, with files for response variable # (activity cost and pursuit duration) within each. Within those files, rows represent the x direction (first column # being x coordinate labels) and columns the y direction (first row being y labels). Tables are provided for the # positive half of the x-y planes; maneuvers elsewhere ar found by rotating into that plane. # We save activity cost and pursuit duration, which are the relevant numbers to plug into another foraging model, # but this must be done carefully to make sure SMR is also accounted for when calculating net rate of energy intake. # The most relevant "handling time" for a model is pursuit duration, under the assumption that wait time and # the return stage don't count as "handling" because the fish can be detecting and potentially pursuing other prey # during that time. Only the pursuit is time lost with regard to other #----------------------------------------------------------------------------------------------------------------------- # MODELING CHOICES, INCLUDING FIXED SETTINGS #----------------------------------------------------------------------------------------------------------------------- # It isn't feasible to precalculate values for every conceivable combination of settings, so we instead choose the most # likely applications and vary only the quantities mentioned in the Basic Summary. # # Wait times are enabled to give accurate energy costs for items detected far upstream. Our paper suggested that # including wait times resulted in a worse fit to real data for the model, but that was in a test assuming the fish # detected the item when it responded to it. We could not know when they actually first detected items or how long they # really waited in such cases. However, from a theoretical standpoint, excluding wait time would result in excessively # high predicted time/energy costs for items detected far upstream, which is probably not the case. To avoid penalizing # more effective prey detection, we have to include wait time. Users wishing to exclude it can use this script to make # their own, net set of tables. # # We use only energy cost and not total cost (including opportunity cost based on NREI), because it would require far # more calculations to account for different levels of possible NREI, and we found that the model excluding opportunity # costs was the best fit our maneuver data from real fish anyway. #--------------------------------------------------------------------------------------------------------------------------------------------------------- # BOUNDARY CALCULATIONS FOR THE TABLES #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CURRENT SPEED speeds_in_bodylengths = [attempt.mean_current_speed / attempt.fish.best_length for attempt in all_foraging_attempts] max(speeds_in_bodylengths) # 6.83 np.percentile(speeds_in_bodylengths, 99) # 5.48 np.percentile(speeds_in_bodylengths, 95) # 4.04 np.percentile(speeds_in_bodylengths, 50) # 1.46 # Maneuvers at high current speeds in bodylengths/s were not uncommon. # It wouldn't be too abnormal to have a 4 cm Chinook in 20 cm/s water (5 bodylengths) # But it would be very abnormal to have a 50 cm Grayling in 150 cm/s water (3 bodylengths) # # For distance: # The absolute longest was 9.2 bodylengths, next longest 8.8, then 8.1, then a couple dozen. # in the 7-6-5 range. The 95th percentile is 2.9 bodylengths and 99th percentile is 4.9 bodylengths. # We can probably assume some of the really long detections were extreme anomalies or misinterpretations of joint maneuvers. # # # I'm putting the database query inside the second but not third level of the nested list, to strike the balance between # query size and query count # Size appropriate velocity max is designed to exceed realistic foraging for fish of a given size and then some, while # still eliminating the work of calculating absurdly high velocities. max_velocity = 3.0 * fl + 50 # in cm/s and cm fork_lengths = list(np.concatenate([np.arange(3,5,0.2),np.arange(5,10,0.5),np.arange(10,20,1),np.arange(20,57,2),np.arange(58,80,3)])) time_per_number = 6.0 # seconds required to compute one cell in the spreadsheet bytes_per_number = 9.8 # bytes required to store one cell in the spreadsheet numbers_per_sheet = 999 # number of cells in each sheet, based on the resolution max_instances = 25 # max number of virtual machines running calculations queries = [] total_sheets = 0 total_bytes = 0 total_time = 0 all_velocities = list(np.concatenate([np.arange(1,19,2), np.arange(19, 40, 3), np.arange(40, 90, 5), np.arange(90,166,15)])) for fl in fork_lengths: size_appropriate_velocities = [v for v in all_velocities if v < 3.0 * fl + 50] for v in size_appropriate_velocities: total_sheets += 1 total_bytes += bytes_per_number * numbers_per_sheet total_time += time_per_number * numbers_per_sheet queries.append(f"INSERT INTO maneuver_model_tasks (fork_length, velocity) VALUES ({fl:.1f}, {v:.1f})") total_bytes *= 2 # because there are 2 response variables time_per_sheet = total_time / total_sheets real_time = (total_time / 3600) / max_instances print("Total calculation predicted to generate {0} sheets in {1:.1f} cpu-hours ({2:.1f} min/sheet, {4:.1f} hours for {5} instances) taking {3:.1f} mb of space.".format(total_sheets, total_time/3600.0, time_per_sheet/60.0, total_bytes/(1024.0*1024.0), real_time, max_instances)) # Actually generate the to-do list in the database -- ONLY DO THIS ONCE unless I am resetting the whole thing! # If I do reset the whole thing, I need to do ALTER TABLE maneuver_model_tasks AUTO_INCREMENT = 1 db = pymysql.connect(host="troutnut.com", port=3306, user="jasonn5_calibtra", passwd="aVoUgLJyKo926", db="jasonn5_calibration_tracking", autocommit=True) cursor = db.cursor() for i in range(len(queries)): print("Running query {0} of {1}.".format(i,len(queries))) exq = cursor.execute(queries[i]) db.close() #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CHECKING ACCURACY WITH WHICH SPLINE PREDICTS DIRECT MODEL PREDICTIONS and optimizing spline parameters #--------------------------------------------------------------------------------------------------------------------------------------------------------- # This code refers to all kinds of global variables from other sheets and will be a nuisance to reuse. It's not really meant for that. Just look at the # commented results below instead. import random # get xs, ys, ec from maneuver_spreadsheet_creation.py spl_ec_665 = RectBivariateSpline(xs, ys, ec) spl_ec_deg5 = RectBivariateSpline(xs, ys, ec, kx=5, ky=5) # kx, ky = spline degree... both 1 and 5 worked worse spl_efc_smooth = RectBivariateSpline(xs, ys, ec, s=2) # s = smoothing... looked good on plots but bad for results # worked very poorly with x=-12, y=5 errors_A = [] errors_B = [] for i in range(1,300): # Uniform test throughout the possible reaction distances or an inner subset thereof #distfact=1 #testx = random.uniform(min(xs)/distfact,max(xs)/distfact) #testy = random.uniform(min(ys)/distfact,max(ys)/distfact) # Test weighted to actual reaction distances (in bodylengths) by choosing randomly from real fish data from all_foraging_attempts in Maneuver Paper Calculations.py attempt = random.choice(all_foraging_attempts) testx = fork_length * attempt.reaction_vector[0] / attempt.fish.best_length testy = fork_length * attempt.lateral_reaction_distance / attempt.fish.best_length test_energy_model = optimize.optimal_maneuver(fish, detection_point_3D = (testx, testy, 0.0), popsize=4, variant_scale=1.5, mixing_ratio=3.0, iterations=4500, use_starting_iterations=True, num_starting_populations=12, num_starting_iterations=500).dynamics.activity_cost errors_A.append(100 * abs(spl_ec_665.ev(testx, testy) - test_energy_model) / test_energy_model) # percent error in log spline model errors_B.append(100 * abs(spl_ec_416.ev(testx, testy) - test_energy_model) / test_energy_model) # percent error in lin spline model print("Mean A (665) error is {0:.2f}, median {1:.2f}, 95th percentile {2:.2f}, max {3:.2f}. Mean B (416) error is {4:.2f}, median {5:.2f}, 95th percentile is {6:.2f}, max is {7:.2f}.".format(np.mean(errors_A), np.median(errors_A), percentile(errors_A,95), max(errors_A), np.mean(errors_B), np.median(errors_B), percentile(errors_B,95), max(errors_B))) # Within the region closest to the fish (distfact=5, inner 20 % of interpolated range), logspline works better: # Mean logspline error is 0.00897633464221, median 0.0044701166147, max 0.0864545485659. Mean linspline error is 0.0399583067863, median 0.0247217810159, max 0.125442996728. # For the inner 50% of extrapolated range (distfact=2): # Mean logspline error is 0.0484298747137, median 0.0206530488527, max 0.355140338201. Mean linspline error is 0.0270808114457, median 0.0120931308594, max 0.198272165424. # For the overall extrapolated region: # Mean logspline error is 0.128979262139, median 0.0599612297048, max 0.692983284836. Mean linspline error is 0.0114831437642, median 0.00342595840382, max 0.103078836909. # AFTER changing spline degree to 1 (linear) # distfact = 1 # Mean logspline error is 0.0218975898111, median 0.0113214302154, max 0.138234911992. Mean linspline error is 0.0125631421346, median 0.0035485055496, max 0.140522637627 # distfact = 2 # Mean logspline error is 0.0368812920421, median 0.0152525418311, max 0.198044035521. Mean linspline error is 0.0243640316147, median 0.0150866681404, max 0.147206404992. # distfact = 5 # Mean logspline error is 0.0137129814445, median 0.00836141861284, max 0.105787513153. Mean linspline error is 0.0393639917989, median 0.0210805421877, max 0.176547064472. # NEXT TEST: Increase resolution to 312 pts, and start using percent errors instead of regular errors, cover full realistic ranges # distfact = 1 # Mean logspline error is 6.61230781457, median 2.52211224078, max 81.438706048. Mean linspline error is 2.70886911207, median 0.140402890291, max 44.1715921604. # distfact = 2 # Mean logspline error is 5.13227646421, median 1.11771126799, max 64.1222831046. Mean linspline error is 15.9565636692, median 1.12503901849, max 632.523260832. # distfact = 5 # Mean logspline error is 5.95691325441, median 3.00161632168, max 82.3370962708. Mean linspline error is 8.76483038921, median 3.49808649369, max 84.3056318184. # Now using data from actual fish within the range where they're really doing stuff (N=300 for calculating these stats) # Mean logspline error is 6.22782654855, median 3.72433730964, max 42.6818144461. Mean linspline error is 15.4806385555, median 4.82063948279, max 323.389534253 # That is, dare I say, tolerable... but let's see if increasing iterations helps. # Now testing two models with either 312-point or 1248-point grids (both on log scale) used to create the spline # Mean 1248-pt error is 5.32348294783, median 1.49117256539, max 47.2459070847. Mean 312-pt error is 6.28938249563, median 3.69462175484, max 38.4551959213. # Not that much of an improvement. What if instead of increasing resolution so much, we increase iterations to smooth things out? # Using 12x500+4500 (both for extrapolation and test) # Switching from 6x500 to 12x500 for the preliminary solution really didn't help much. # Now trying out smoothing on the interpolation in model 312bs (smoothed at s=2 in RectBivariateSpline) # Mean 312b error is 5.9512430459, median 4.12127444535, max 66.6321313342. Mean 312bs error is 36.5312610831, median 22.2841087968, max 395.585373521. # Okay, so the smoothing looks good on the plots, but it's actually awful. # Next task is to see how much difference spline order makes, model 312bl, with linear instead of cubic spline interpolation: # Mean 312b error is 5.41455284206, median 3.74412497518, max 40.8658187381. Mean 312bl error is 35.3688630676, median 23.6634535005, max 488.033873058. # Okay, so the cubic spline is VASTLY better than linear interpolation. # Next test is with better interpolation grid that isn't so heavily weighted toward zero (starting log scale at 1.0 instead of 0.1, but adding in some short ones) # Mean 416 error is 6.49, median 3.00, 95th percentile 22.91, max 156.98. Mean 312 error is 6.54, median 4.08, max 20.75, 95th percentile 72.64. # Also can't hurt to give quintic splines a try... okay, they're no good. # Mean A (416) error is 5.12, median 2.07, 95th percentile 20.01, max 118.43. Mean B (416q) error is 18.64, median 7.24, max 50.52, 95th percentile 1172.86. # Next result comparing 665 (beefing up resolution on the -x and +y quadrant) to the 416 result (which had fairly useless, beefed-up resolution in the +x +y quadrant) # Mean A (665) error is 4.76, median 1.84, 95th percentile 16.38, max 96.79. Mean B (416) error is 5.23, median 2.11, 95th percentile is 18.74, max is 67.20. # Calling that one good enough. import matplotlib.pyplot as plt import seaborn as sns sns.set_style('white') fig, (ax1, ax2, ax3) = plt.subplots(1, 3) xnew, ynew = np.ogrid[xmin:xmax:1000j, ymin:ymax:1000j] #meshgrid(xx, yy) znew_a = spl_ec_312(xnew, ynew) znew_b = spl_ec_416(xnew, ynew) znew_c = spl_ec_1248(xnew, ynew) im1 = ax1.imshow(znew_a, cmap='viridis', extent=(ymin,ymax,xmin,xmax)) im2 = ax2.imshow(znew_b, cmap='viridis', extent=(ymin,ymax,xmin,xmax)) im3 = ax3.imshow(znew_c, cmap='viridis', extent=(ymin,ymax,xmin,xmax)) ax1.set_title('312') ax2.set_title('416') ax3.set_title('1248') plt.show() #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CHECKING EFFECT OF POINTS USED TO CONSTRUCT SPLINES ON SPLINE CALL SPEED (NO EFFECT) #--------------------------------------------------------------------------------------------------------------------------------------------------------- # The following test, conducted using splines based on 25, 100, or 900 points, shows that the number of points used to construct the spline has # no detectable effect on very short amount of time required to get call the spline function (about 3.5 microseconds). This means the only limiting # factor in the resolution of grid we're using to create the splines is how much computing time & storage space we have to calculate/hold the grids. # Note that the real optimal energy cost directly calculated from the model with tons of iterations is about 0.6127 J. #from timeit import timeit #timeit("spl_ec25(-2.55, 16.38)", setup="from __main__ import spl_ec25", number = 100000)/100000 #timeit("spl_ec100(-2.55, 16.38)", setup="from __main__ import spl_ec100", number = 100000)/100000 #timeit("spl_ec900(-2.55, 16.38)", setup="from __main__ import spl_ec900", number = 100000)/100000 #--------------------------------------------------------------------------------------------------------------------------------------------------------- # FINAL INTERPOLATION CODE (FROM CSV FILE) #--------------------------------------------------------------------------------------------------------------------------------------------------------- # Note: Using non-default options (for spline degree or smoothing) does not improve interpolation quality, as shown above. def maneuver_cost_interpolation(file_to_interpolate):
def interpolate_maneuver_cost(detection_point_3D, interpolation_function): x, y, z = detection_point_3D R = np.sqrt(y*y + z*z) matrix_2Dfrom3D = np.array([[1,0,0],[0,y/R,z/R],[0,-z/R,y/R]]) # matrix to rotate the 3-D detection point about the x-axis into the x-y plane (xrot, yrot) = matrix_2Dfrom3D.dot(np.array(detection_point_3D))[0:2] # 2-D detection point to use for the model, not yet sign-adjusted yrot = abs(yrot) # flip negative-y values to the positive half of the x-y plane for cost calculations return interpolation_function(xrot, yrot)[0,0] testpt = (-15, 5, 3) interp_ec = maneuver_cost_interpolation("/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/maneuvermodel/sample_data/interpolation_sample_data_energy_cost.csv") interpolate_maneuver_cost(testpt, interp_ec) from timeit import timeit timeit("interpolate_maneuver_cost(testpt, interp_ec)", setup="from __main__ import interpolate_maneuver_cost, testpt, interp_ec", number = 1000)/1000 # Could use Dill to serialize the interpolations, too # https://stackoverflow.com/questions/23997431/is-there-a-way-to-pickle-a-scipy-interpolate-rbf-object #--------------------------------------------------------------------------------------------------------------------------------------------------------- # TRANSFER THIS CODE TO AMAZON INSTANCES #--------------------------------------------------------------------------------------------------------------------------------------------------------- # Had to install ssh-askpass for this to work: https://github.com/theseal/ssh-askpass # Make sure to use the .pem version of the keyfile straight from Amazon, not the Putty .ppk version # Also have to chmod 400 the .pem file before it can be used. #import os #ec2_private_key = "'/Users/Jason/Dropbox/Amazon AWS/NeuswangerManeuverModelS3Instances.pem'" #module_folder = "'/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/maneuvermodel'" #creation_script = "'/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/maneuver_spreadsheet_creation.py'" #ec2_server_address = "ec2-34-216-120-173.us-west-2.compute.amazonaws.com" # NEED TO FREQUENTLY UPDATE #remote_folder = "~" #os.system("scp -r -i {0} {1} ec2-user@{2}:{3}".format(ec2_private_key, module_folder, ec2_server_address, remote_folder)) #command2 = "scp -i {0} {1} ec2-user@{2}:{3}".format(ec2_private_key, creation_script, ec2_server_address, remote_folder) ##print(command2) # WHEN THE SERVER IP CHANGES, NEED TO RE-RUN PRINTED SCP COMMAND IN TERMINAL TO GET AROUND ssh-askpass ERROR #os.system(command2) #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CLEAN UP INTERRUPTED PROCESSES #--------------------------------------------------------------------------------------------------------------------------------------------------------- db = pymysql.connect(host="troutnut.com", port=3306, user="jasonn5_calibtra", passwd="aVoUgLJyKo926", db="jasonn5_calibration_tracking", autocommit=True) #db = pymysql.connect(host="maneuver-model-tasks.crtfph6ctn2x.us-west-2.rds.amazonaws.com", port=3306, user="manmoduser", passwd="x]%o4g28", db="maneuver_model_tasks", autocommit=True) cursor = db.cursor() cursor.execute("UPDATE maneuver_model_tasks SET start_time = NULL WHERE end_time IS NULL") db.close()
filedata = np.genfromtxt(file_to_interpolate, delimiter=',') xs = filedata[1:,0] # In sample data, x should go from -24 to 15 ys = filedata[0,1:-1] # In sample data, y should go from .001 to 18 data = filedata[1:,1:-1] # We have to trim off the last element of each row (nan) because of the trailing comma when saving return RectBivariateSpline(xs, ys, data)
identifier_body
maneuver_spreadsheet_support.py
import numpy as np import pymysql from maneuvermodel import maneuveringfish, optimize from scipy.interpolate import RectBivariateSpline # had to sys.path.append("/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/") when it stopped finding the module #----------------------------------------------------------------------------------------------------------------------- # BASIC SUMMARY #----------------------------------------------------------------------------------------------------------------------- # This file is designed to explore the relevant boundaries for the interpolation tables, erring on the side of making # their coverage comfortably more expansive than needed. It includes justifications for the dimensions of the tables as # well as which tables are available, and it places job descriptions for the table calculator in a database. # Data will eventually be organized by subfolders by fork length and current speed, with files for response variable # (activity cost and pursuit duration) within each. Within those files, rows represent the x direction (first column # being x coordinate labels) and columns the y direction (first row being y labels). Tables are provided for the # positive half of the x-y planes; maneuvers elsewhere ar found by rotating into that plane. # We save activity cost and pursuit duration, which are the relevant numbers to plug into another foraging model, # but this must be done carefully to make sure SMR is also accounted for when calculating net rate of energy intake. # The most relevant "handling time" for a model is pursuit duration, under the assumption that wait time and # the return stage don't count as "handling" because the fish can be detecting and potentially pursuing other prey # during that time. Only the pursuit is time lost with regard to other #----------------------------------------------------------------------------------------------------------------------- # MODELING CHOICES, INCLUDING FIXED SETTINGS #----------------------------------------------------------------------------------------------------------------------- # It isn't feasible to precalculate values for every conceivable combination of settings, so we instead choose the most # likely applications and vary only the quantities mentioned in the Basic Summary. # # Wait times are enabled to give accurate energy costs for items detected far upstream. Our paper suggested that # including wait times resulted in a worse fit to real data for the model, but that was in a test assuming the fish # detected the item when it responded to it. We could not know when they actually first detected items or how long they # really waited in such cases. However, from a theoretical standpoint, excluding wait time would result in excessively # high predicted time/energy costs for items detected far upstream, which is probably not the case. To avoid penalizing # more effective prey detection, we have to include wait time. Users wishing to exclude it can use this script to make # their own, net set of tables. # # We use only energy cost and not total cost (including opportunity cost based on NREI), because it would require far # more calculations to account for different levels of possible NREI, and we found that the model excluding opportunity # costs was the best fit our maneuver data from real fish anyway. #--------------------------------------------------------------------------------------------------------------------------------------------------------- # BOUNDARY CALCULATIONS FOR THE TABLES #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CURRENT SPEED speeds_in_bodylengths = [attempt.mean_current_speed / attempt.fish.best_length for attempt in all_foraging_attempts] max(speeds_in_bodylengths) # 6.83 np.percentile(speeds_in_bodylengths, 99) # 5.48 np.percentile(speeds_in_bodylengths, 95) # 4.04 np.percentile(speeds_in_bodylengths, 50) # 1.46 # Maneuvers at high current speeds in bodylengths/s were not uncommon. # It wouldn't be too abnormal to have a 4 cm Chinook in 20 cm/s water (5 bodylengths) # But it would be very abnormal to have a 50 cm Grayling in 150 cm/s water (3 bodylengths) # # For distance: # The absolute longest was 9.2 bodylengths, next longest 8.8, then 8.1, then a couple dozen. # in the 7-6-5 range. The 95th percentile is 2.9 bodylengths and 99th percentile is 4.9 bodylengths. # We can probably assume some of the really long detections were extreme anomalies or misinterpretations of joint maneuvers. # # # I'm putting the database query inside the second but not third level of the nested list, to strike the balance between # query size and query count # Size appropriate velocity max is designed to exceed realistic foraging for fish of a given size and then some, while # still eliminating the work of calculating absurdly high velocities. max_velocity = 3.0 * fl + 50 # in cm/s and cm fork_lengths = list(np.concatenate([np.arange(3,5,0.2),np.arange(5,10,0.5),np.arange(10,20,1),np.arange(20,57,2),np.arange(58,80,3)])) time_per_number = 6.0 # seconds required to compute one cell in the spreadsheet bytes_per_number = 9.8 # bytes required to store one cell in the spreadsheet numbers_per_sheet = 999 # number of cells in each sheet, based on the resolution max_instances = 25 # max number of virtual machines running calculations queries = [] total_sheets = 0 total_bytes = 0 total_time = 0 all_velocities = list(np.concatenate([np.arange(1,19,2), np.arange(19, 40, 3), np.arange(40, 90, 5), np.arange(90,166,15)])) for fl in fork_lengths: size_appropriate_velocities = [v for v in all_velocities if v < 3.0 * fl + 50] for v in size_appropriate_velocities:
total_bytes *= 2 # because there are 2 response variables time_per_sheet = total_time / total_sheets real_time = (total_time / 3600) / max_instances print("Total calculation predicted to generate {0} sheets in {1:.1f} cpu-hours ({2:.1f} min/sheet, {4:.1f} hours for {5} instances) taking {3:.1f} mb of space.".format(total_sheets, total_time/3600.0, time_per_sheet/60.0, total_bytes/(1024.0*1024.0), real_time, max_instances)) # Actually generate the to-do list in the database -- ONLY DO THIS ONCE unless I am resetting the whole thing! # If I do reset the whole thing, I need to do ALTER TABLE maneuver_model_tasks AUTO_INCREMENT = 1 db = pymysql.connect(host="troutnut.com", port=3306, user="jasonn5_calibtra", passwd="aVoUgLJyKo926", db="jasonn5_calibration_tracking", autocommit=True) cursor = db.cursor() for i in range(len(queries)): print("Running query {0} of {1}.".format(i,len(queries))) exq = cursor.execute(queries[i]) db.close() #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CHECKING ACCURACY WITH WHICH SPLINE PREDICTS DIRECT MODEL PREDICTIONS and optimizing spline parameters #--------------------------------------------------------------------------------------------------------------------------------------------------------- # This code refers to all kinds of global variables from other sheets and will be a nuisance to reuse. It's not really meant for that. Just look at the # commented results below instead. import random # get xs, ys, ec from maneuver_spreadsheet_creation.py spl_ec_665 = RectBivariateSpline(xs, ys, ec) spl_ec_deg5 = RectBivariateSpline(xs, ys, ec, kx=5, ky=5) # kx, ky = spline degree... both 1 and 5 worked worse spl_efc_smooth = RectBivariateSpline(xs, ys, ec, s=2) # s = smoothing... looked good on plots but bad for results # worked very poorly with x=-12, y=5 errors_A = [] errors_B = [] for i in range(1,300): # Uniform test throughout the possible reaction distances or an inner subset thereof #distfact=1 #testx = random.uniform(min(xs)/distfact,max(xs)/distfact) #testy = random.uniform(min(ys)/distfact,max(ys)/distfact) # Test weighted to actual reaction distances (in bodylengths) by choosing randomly from real fish data from all_foraging_attempts in Maneuver Paper Calculations.py attempt = random.choice(all_foraging_attempts) testx = fork_length * attempt.reaction_vector[0] / attempt.fish.best_length testy = fork_length * attempt.lateral_reaction_distance / attempt.fish.best_length test_energy_model = optimize.optimal_maneuver(fish, detection_point_3D = (testx, testy, 0.0), popsize=4, variant_scale=1.5, mixing_ratio=3.0, iterations=4500, use_starting_iterations=True, num_starting_populations=12, num_starting_iterations=500).dynamics.activity_cost errors_A.append(100 * abs(spl_ec_665.ev(testx, testy) - test_energy_model) / test_energy_model) # percent error in log spline model errors_B.append(100 * abs(spl_ec_416.ev(testx, testy) - test_energy_model) / test_energy_model) # percent error in lin spline model print("Mean A (665) error is {0:.2f}, median {1:.2f}, 95th percentile {2:.2f}, max {3:.2f}. Mean B (416) error is {4:.2f}, median {5:.2f}, 95th percentile is {6:.2f}, max is {7:.2f}.".format(np.mean(errors_A), np.median(errors_A), percentile(errors_A,95), max(errors_A), np.mean(errors_B), np.median(errors_B), percentile(errors_B,95), max(errors_B))) # Within the region closest to the fish (distfact=5, inner 20 % of interpolated range), logspline works better: # Mean logspline error is 0.00897633464221, median 0.0044701166147, max 0.0864545485659. Mean linspline error is 0.0399583067863, median 0.0247217810159, max 0.125442996728. # For the inner 50% of extrapolated range (distfact=2): # Mean logspline error is 0.0484298747137, median 0.0206530488527, max 0.355140338201. Mean linspline error is 0.0270808114457, median 0.0120931308594, max 0.198272165424. # For the overall extrapolated region: # Mean logspline error is 0.128979262139, median 0.0599612297048, max 0.692983284836. Mean linspline error is 0.0114831437642, median 0.00342595840382, max 0.103078836909. # AFTER changing spline degree to 1 (linear) # distfact = 1 # Mean logspline error is 0.0218975898111, median 0.0113214302154, max 0.138234911992. Mean linspline error is 0.0125631421346, median 0.0035485055496, max 0.140522637627 # distfact = 2 # Mean logspline error is 0.0368812920421, median 0.0152525418311, max 0.198044035521. Mean linspline error is 0.0243640316147, median 0.0150866681404, max 0.147206404992. # distfact = 5 # Mean logspline error is 0.0137129814445, median 0.00836141861284, max 0.105787513153. Mean linspline error is 0.0393639917989, median 0.0210805421877, max 0.176547064472. # NEXT TEST: Increase resolution to 312 pts, and start using percent errors instead of regular errors, cover full realistic ranges # distfact = 1 # Mean logspline error is 6.61230781457, median 2.52211224078, max 81.438706048. Mean linspline error is 2.70886911207, median 0.140402890291, max 44.1715921604. # distfact = 2 # Mean logspline error is 5.13227646421, median 1.11771126799, max 64.1222831046. Mean linspline error is 15.9565636692, median 1.12503901849, max 632.523260832. # distfact = 5 # Mean logspline error is 5.95691325441, median 3.00161632168, max 82.3370962708. Mean linspline error is 8.76483038921, median 3.49808649369, max 84.3056318184. # Now using data from actual fish within the range where they're really doing stuff (N=300 for calculating these stats) # Mean logspline error is 6.22782654855, median 3.72433730964, max 42.6818144461. Mean linspline error is 15.4806385555, median 4.82063948279, max 323.389534253 # That is, dare I say, tolerable... but let's see if increasing iterations helps. # Now testing two models with either 312-point or 1248-point grids (both on log scale) used to create the spline # Mean 1248-pt error is 5.32348294783, median 1.49117256539, max 47.2459070847. Mean 312-pt error is 6.28938249563, median 3.69462175484, max 38.4551959213. # Not that much of an improvement. What if instead of increasing resolution so much, we increase iterations to smooth things out? # Using 12x500+4500 (both for extrapolation and test) # Switching from 6x500 to 12x500 for the preliminary solution really didn't help much. # Now trying out smoothing on the interpolation in model 312bs (smoothed at s=2 in RectBivariateSpline) # Mean 312b error is 5.9512430459, median 4.12127444535, max 66.6321313342. Mean 312bs error is 36.5312610831, median 22.2841087968, max 395.585373521. # Okay, so the smoothing looks good on the plots, but it's actually awful. # Next task is to see how much difference spline order makes, model 312bl, with linear instead of cubic spline interpolation: # Mean 312b error is 5.41455284206, median 3.74412497518, max 40.8658187381. Mean 312bl error is 35.3688630676, median 23.6634535005, max 488.033873058. # Okay, so the cubic spline is VASTLY better than linear interpolation. # Next test is with better interpolation grid that isn't so heavily weighted toward zero (starting log scale at 1.0 instead of 0.1, but adding in some short ones) # Mean 416 error is 6.49, median 3.00, 95th percentile 22.91, max 156.98. Mean 312 error is 6.54, median 4.08, max 20.75, 95th percentile 72.64. # Also can't hurt to give quintic splines a try... okay, they're no good. # Mean A (416) error is 5.12, median 2.07, 95th percentile 20.01, max 118.43. Mean B (416q) error is 18.64, median 7.24, max 50.52, 95th percentile 1172.86. # Next result comparing 665 (beefing up resolution on the -x and +y quadrant) to the 416 result (which had fairly useless, beefed-up resolution in the +x +y quadrant) # Mean A (665) error is 4.76, median 1.84, 95th percentile 16.38, max 96.79. Mean B (416) error is 5.23, median 2.11, 95th percentile is 18.74, max is 67.20. # Calling that one good enough. import matplotlib.pyplot as plt import seaborn as sns sns.set_style('white') fig, (ax1, ax2, ax3) = plt.subplots(1, 3) xnew, ynew = np.ogrid[xmin:xmax:1000j, ymin:ymax:1000j] #meshgrid(xx, yy) znew_a = spl_ec_312(xnew, ynew) znew_b = spl_ec_416(xnew, ynew) znew_c = spl_ec_1248(xnew, ynew) im1 = ax1.imshow(znew_a, cmap='viridis', extent=(ymin,ymax,xmin,xmax)) im2 = ax2.imshow(znew_b, cmap='viridis', extent=(ymin,ymax,xmin,xmax)) im3 = ax3.imshow(znew_c, cmap='viridis', extent=(ymin,ymax,xmin,xmax)) ax1.set_title('312') ax2.set_title('416') ax3.set_title('1248') plt.show() #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CHECKING EFFECT OF POINTS USED TO CONSTRUCT SPLINES ON SPLINE CALL SPEED (NO EFFECT) #--------------------------------------------------------------------------------------------------------------------------------------------------------- # The following test, conducted using splines based on 25, 100, or 900 points, shows that the number of points used to construct the spline has # no detectable effect on very short amount of time required to get call the spline function (about 3.5 microseconds). This means the only limiting # factor in the resolution of grid we're using to create the splines is how much computing time & storage space we have to calculate/hold the grids. # Note that the real optimal energy cost directly calculated from the model with tons of iterations is about 0.6127 J. #from timeit import timeit #timeit("spl_ec25(-2.55, 16.38)", setup="from __main__ import spl_ec25", number = 100000)/100000 #timeit("spl_ec100(-2.55, 16.38)", setup="from __main__ import spl_ec100", number = 100000)/100000 #timeit("spl_ec900(-2.55, 16.38)", setup="from __main__ import spl_ec900", number = 100000)/100000 #--------------------------------------------------------------------------------------------------------------------------------------------------------- # FINAL INTERPOLATION CODE (FROM CSV FILE) #--------------------------------------------------------------------------------------------------------------------------------------------------------- # Note: Using non-default options (for spline degree or smoothing) does not improve interpolation quality, as shown above. def maneuver_cost_interpolation(file_to_interpolate): filedata = np.genfromtxt(file_to_interpolate, delimiter=',') xs = filedata[1:,0] # In sample data, x should go from -24 to 15 ys = filedata[0,1:-1] # In sample data, y should go from .001 to 18 data = filedata[1:,1:-1] # We have to trim off the last element of each row (nan) because of the trailing comma when saving return RectBivariateSpline(xs, ys, data) def interpolate_maneuver_cost(detection_point_3D, interpolation_function): x, y, z = detection_point_3D R = np.sqrt(y*y + z*z) matrix_2Dfrom3D = np.array([[1,0,0],[0,y/R,z/R],[0,-z/R,y/R]]) # matrix to rotate the 3-D detection point about the x-axis into the x-y plane (xrot, yrot) = matrix_2Dfrom3D.dot(np.array(detection_point_3D))[0:2] # 2-D detection point to use for the model, not yet sign-adjusted yrot = abs(yrot) # flip negative-y values to the positive half of the x-y plane for cost calculations return interpolation_function(xrot, yrot)[0,0] testpt = (-15, 5, 3) interp_ec = maneuver_cost_interpolation("/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/maneuvermodel/sample_data/interpolation_sample_data_energy_cost.csv") interpolate_maneuver_cost(testpt, interp_ec) from timeit import timeit timeit("interpolate_maneuver_cost(testpt, interp_ec)", setup="from __main__ import interpolate_maneuver_cost, testpt, interp_ec", number = 1000)/1000 # Could use Dill to serialize the interpolations, too # https://stackoverflow.com/questions/23997431/is-there-a-way-to-pickle-a-scipy-interpolate-rbf-object #--------------------------------------------------------------------------------------------------------------------------------------------------------- # TRANSFER THIS CODE TO AMAZON INSTANCES #--------------------------------------------------------------------------------------------------------------------------------------------------------- # Had to install ssh-askpass for this to work: https://github.com/theseal/ssh-askpass # Make sure to use the .pem version of the keyfile straight from Amazon, not the Putty .ppk version # Also have to chmod 400 the .pem file before it can be used. #import os #ec2_private_key = "'/Users/Jason/Dropbox/Amazon AWS/NeuswangerManeuverModelS3Instances.pem'" #module_folder = "'/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/maneuvermodel'" #creation_script = "'/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/maneuver_spreadsheet_creation.py'" #ec2_server_address = "ec2-34-216-120-173.us-west-2.compute.amazonaws.com" # NEED TO FREQUENTLY UPDATE #remote_folder = "~" #os.system("scp -r -i {0} {1} ec2-user@{2}:{3}".format(ec2_private_key, module_folder, ec2_server_address, remote_folder)) #command2 = "scp -i {0} {1} ec2-user@{2}:{3}".format(ec2_private_key, creation_script, ec2_server_address, remote_folder) ##print(command2) # WHEN THE SERVER IP CHANGES, NEED TO RE-RUN PRINTED SCP COMMAND IN TERMINAL TO GET AROUND ssh-askpass ERROR #os.system(command2) #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CLEAN UP INTERRUPTED PROCESSES #--------------------------------------------------------------------------------------------------------------------------------------------------------- db = pymysql.connect(host="troutnut.com", port=3306, user="jasonn5_calibtra", passwd="aVoUgLJyKo926", db="jasonn5_calibration_tracking", autocommit=True) #db = pymysql.connect(host="maneuver-model-tasks.crtfph6ctn2x.us-west-2.rds.amazonaws.com", port=3306, user="manmoduser", passwd="x]%o4g28", db="maneuver_model_tasks", autocommit=True) cursor = db.cursor() cursor.execute("UPDATE maneuver_model_tasks SET start_time = NULL WHERE end_time IS NULL") db.close()
total_sheets += 1 total_bytes += bytes_per_number * numbers_per_sheet total_time += time_per_number * numbers_per_sheet queries.append(f"INSERT INTO maneuver_model_tasks (fork_length, velocity) VALUES ({fl:.1f}, {v:.1f})")
conditional_block
maneuver_spreadsheet_support.py
import numpy as np import pymysql from maneuvermodel import maneuveringfish, optimize from scipy.interpolate import RectBivariateSpline # had to sys.path.append("/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/") when it stopped finding the module #----------------------------------------------------------------------------------------------------------------------- # BASIC SUMMARY #----------------------------------------------------------------------------------------------------------------------- # This file is designed to explore the relevant boundaries for the interpolation tables, erring on the side of making # their coverage comfortably more expansive than needed. It includes justifications for the dimensions of the tables as # well as which tables are available, and it places job descriptions for the table calculator in a database. # Data will eventually be organized by subfolders by fork length and current speed, with files for response variable # (activity cost and pursuit duration) within each. Within those files, rows represent the x direction (first column # being x coordinate labels) and columns the y direction (first row being y labels). Tables are provided for the # positive half of the x-y planes; maneuvers elsewhere ar found by rotating into that plane. # We save activity cost and pursuit duration, which are the relevant numbers to plug into another foraging model, # but this must be done carefully to make sure SMR is also accounted for when calculating net rate of energy intake. # The most relevant "handling time" for a model is pursuit duration, under the assumption that wait time and # the return stage don't count as "handling" because the fish can be detecting and potentially pursuing other prey # during that time. Only the pursuit is time lost with regard to other #----------------------------------------------------------------------------------------------------------------------- # MODELING CHOICES, INCLUDING FIXED SETTINGS #----------------------------------------------------------------------------------------------------------------------- # It isn't feasible to precalculate values for every conceivable combination of settings, so we instead choose the most # likely applications and vary only the quantities mentioned in the Basic Summary. # # Wait times are enabled to give accurate energy costs for items detected far upstream. Our paper suggested that # including wait times resulted in a worse fit to real data for the model, but that was in a test assuming the fish # detected the item when it responded to it. We could not know when they actually first detected items or how long they # really waited in such cases. However, from a theoretical standpoint, excluding wait time would result in excessively # high predicted time/energy costs for items detected far upstream, which is probably not the case. To avoid penalizing # more effective prey detection, we have to include wait time. Users wishing to exclude it can use this script to make # their own, net set of tables. # # We use only energy cost and not total cost (including opportunity cost based on NREI), because it would require far # more calculations to account for different levels of possible NREI, and we found that the model excluding opportunity # costs was the best fit our maneuver data from real fish anyway. #--------------------------------------------------------------------------------------------------------------------------------------------------------- # BOUNDARY CALCULATIONS FOR THE TABLES #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CURRENT SPEED speeds_in_bodylengths = [attempt.mean_current_speed / attempt.fish.best_length for attempt in all_foraging_attempts] max(speeds_in_bodylengths) # 6.83 np.percentile(speeds_in_bodylengths, 99) # 5.48 np.percentile(speeds_in_bodylengths, 95) # 4.04 np.percentile(speeds_in_bodylengths, 50) # 1.46 # Maneuvers at high current speeds in bodylengths/s were not uncommon. # It wouldn't be too abnormal to have a 4 cm Chinook in 20 cm/s water (5 bodylengths) # But it would be very abnormal to have a 50 cm Grayling in 150 cm/s water (3 bodylengths) # # For distance: # The absolute longest was 9.2 bodylengths, next longest 8.8, then 8.1, then a couple dozen. # in the 7-6-5 range. The 95th percentile is 2.9 bodylengths and 99th percentile is 4.9 bodylengths. # We can probably assume some of the really long detections were extreme anomalies or misinterpretations of joint maneuvers. # # # I'm putting the database query inside the second but not third level of the nested list, to strike the balance between # query size and query count # Size appropriate velocity max is designed to exceed realistic foraging for fish of a given size and then some, while # still eliminating the work of calculating absurdly high velocities. max_velocity = 3.0 * fl + 50 # in cm/s and cm fork_lengths = list(np.concatenate([np.arange(3,5,0.2),np.arange(5,10,0.5),np.arange(10,20,1),np.arange(20,57,2),np.arange(58,80,3)])) time_per_number = 6.0 # seconds required to compute one cell in the spreadsheet bytes_per_number = 9.8 # bytes required to store one cell in the spreadsheet numbers_per_sheet = 999 # number of cells in each sheet, based on the resolution max_instances = 25 # max number of virtual machines running calculations queries = [] total_sheets = 0 total_bytes = 0 total_time = 0 all_velocities = list(np.concatenate([np.arange(1,19,2), np.arange(19, 40, 3), np.arange(40, 90, 5), np.arange(90,166,15)])) for fl in fork_lengths: size_appropriate_velocities = [v for v in all_velocities if v < 3.0 * fl + 50] for v in size_appropriate_velocities: total_sheets += 1 total_bytes += bytes_per_number * numbers_per_sheet total_time += time_per_number * numbers_per_sheet queries.append(f"INSERT INTO maneuver_model_tasks (fork_length, velocity) VALUES ({fl:.1f}, {v:.1f})") total_bytes *= 2 # because there are 2 response variables time_per_sheet = total_time / total_sheets real_time = (total_time / 3600) / max_instances print("Total calculation predicted to generate {0} sheets in {1:.1f} cpu-hours ({2:.1f} min/sheet, {4:.1f} hours for {5} instances) taking {3:.1f} mb of space.".format(total_sheets, total_time/3600.0, time_per_sheet/60.0, total_bytes/(1024.0*1024.0), real_time, max_instances)) # Actually generate the to-do list in the database -- ONLY DO THIS ONCE unless I am resetting the whole thing! # If I do reset the whole thing, I need to do ALTER TABLE maneuver_model_tasks AUTO_INCREMENT = 1 db = pymysql.connect(host="troutnut.com", port=3306, user="jasonn5_calibtra", passwd="aVoUgLJyKo926", db="jasonn5_calibration_tracking", autocommit=True) cursor = db.cursor() for i in range(len(queries)): print("Running query {0} of {1}.".format(i,len(queries))) exq = cursor.execute(queries[i]) db.close() #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CHECKING ACCURACY WITH WHICH SPLINE PREDICTS DIRECT MODEL PREDICTIONS and optimizing spline parameters #--------------------------------------------------------------------------------------------------------------------------------------------------------- # This code refers to all kinds of global variables from other sheets and will be a nuisance to reuse. It's not really meant for that. Just look at the # commented results below instead. import random # get xs, ys, ec from maneuver_spreadsheet_creation.py spl_ec_665 = RectBivariateSpline(xs, ys, ec) spl_ec_deg5 = RectBivariateSpline(xs, ys, ec, kx=5, ky=5) # kx, ky = spline degree... both 1 and 5 worked worse spl_efc_smooth = RectBivariateSpline(xs, ys, ec, s=2) # s = smoothing... looked good on plots but bad for results # worked very poorly with x=-12, y=5 errors_A = [] errors_B = [] for i in range(1,300): # Uniform test throughout the possible reaction distances or an inner subset thereof #distfact=1 #testx = random.uniform(min(xs)/distfact,max(xs)/distfact) #testy = random.uniform(min(ys)/distfact,max(ys)/distfact) # Test weighted to actual reaction distances (in bodylengths) by choosing randomly from real fish data from all_foraging_attempts in Maneuver Paper Calculations.py attempt = random.choice(all_foraging_attempts) testx = fork_length * attempt.reaction_vector[0] / attempt.fish.best_length testy = fork_length * attempt.lateral_reaction_distance / attempt.fish.best_length test_energy_model = optimize.optimal_maneuver(fish, detection_point_3D = (testx, testy, 0.0), popsize=4, variant_scale=1.5, mixing_ratio=3.0, iterations=4500, use_starting_iterations=True, num_starting_populations=12, num_starting_iterations=500).dynamics.activity_cost errors_A.append(100 * abs(spl_ec_665.ev(testx, testy) - test_energy_model) / test_energy_model) # percent error in log spline model errors_B.append(100 * abs(spl_ec_416.ev(testx, testy) - test_energy_model) / test_energy_model) # percent error in lin spline model print("Mean A (665) error is {0:.2f}, median {1:.2f}, 95th percentile {2:.2f}, max {3:.2f}. Mean B (416) error is {4:.2f}, median {5:.2f}, 95th percentile is {6:.2f}, max is {7:.2f}.".format(np.mean(errors_A), np.median(errors_A), percentile(errors_A,95), max(errors_A), np.mean(errors_B), np.median(errors_B), percentile(errors_B,95), max(errors_B))) # Within the region closest to the fish (distfact=5, inner 20 % of interpolated range), logspline works better: # Mean logspline error is 0.00897633464221, median 0.0044701166147, max 0.0864545485659. Mean linspline error is 0.0399583067863, median 0.0247217810159, max 0.125442996728. # For the inner 50% of extrapolated range (distfact=2): # Mean logspline error is 0.0484298747137, median 0.0206530488527, max 0.355140338201. Mean linspline error is 0.0270808114457, median 0.0120931308594, max 0.198272165424. # For the overall extrapolated region: # Mean logspline error is 0.128979262139, median 0.0599612297048, max 0.692983284836. Mean linspline error is 0.0114831437642, median 0.00342595840382, max 0.103078836909. # AFTER changing spline degree to 1 (linear) # distfact = 1 # Mean logspline error is 0.0218975898111, median 0.0113214302154, max 0.138234911992. Mean linspline error is 0.0125631421346, median 0.0035485055496, max 0.140522637627 # distfact = 2 # Mean logspline error is 0.0368812920421, median 0.0152525418311, max 0.198044035521. Mean linspline error is 0.0243640316147, median 0.0150866681404, max 0.147206404992. # distfact = 5 # Mean logspline error is 0.0137129814445, median 0.00836141861284, max 0.105787513153. Mean linspline error is 0.0393639917989, median 0.0210805421877, max 0.176547064472. # NEXT TEST: Increase resolution to 312 pts, and start using percent errors instead of regular errors, cover full realistic ranges # distfact = 1 # Mean logspline error is 6.61230781457, median 2.52211224078, max 81.438706048. Mean linspline error is 2.70886911207, median 0.140402890291, max 44.1715921604. # distfact = 2 # Mean logspline error is 5.13227646421, median 1.11771126799, max 64.1222831046. Mean linspline error is 15.9565636692, median 1.12503901849, max 632.523260832. # distfact = 5 # Mean logspline error is 5.95691325441, median 3.00161632168, max 82.3370962708. Mean linspline error is 8.76483038921, median 3.49808649369, max 84.3056318184. # Now using data from actual fish within the range where they're really doing stuff (N=300 for calculating these stats) # Mean logspline error is 6.22782654855, median 3.72433730964, max 42.6818144461. Mean linspline error is 15.4806385555, median 4.82063948279, max 323.389534253 # That is, dare I say, tolerable... but let's see if increasing iterations helps. # Now testing two models with either 312-point or 1248-point grids (both on log scale) used to create the spline # Mean 1248-pt error is 5.32348294783, median 1.49117256539, max 47.2459070847. Mean 312-pt error is 6.28938249563, median 3.69462175484, max 38.4551959213. # Not that much of an improvement. What if instead of increasing resolution so much, we increase iterations to smooth things out? # Using 12x500+4500 (both for extrapolation and test) # Switching from 6x500 to 12x500 for the preliminary solution really didn't help much. # Now trying out smoothing on the interpolation in model 312bs (smoothed at s=2 in RectBivariateSpline) # Mean 312b error is 5.9512430459, median 4.12127444535, max 66.6321313342. Mean 312bs error is 36.5312610831, median 22.2841087968, max 395.585373521. # Okay, so the smoothing looks good on the plots, but it's actually awful. # Next task is to see how much difference spline order makes, model 312bl, with linear instead of cubic spline interpolation: # Mean 312b error is 5.41455284206, median 3.74412497518, max 40.8658187381. Mean 312bl error is 35.3688630676, median 23.6634535005, max 488.033873058. # Okay, so the cubic spline is VASTLY better than linear interpolation. # Next test is with better interpolation grid that isn't so heavily weighted toward zero (starting log scale at 1.0 instead of 0.1, but adding in some short ones) # Mean 416 error is 6.49, median 3.00, 95th percentile 22.91, max 156.98. Mean 312 error is 6.54, median 4.08, max 20.75, 95th percentile 72.64. # Also can't hurt to give quintic splines a try... okay, they're no good. # Mean A (416) error is 5.12, median 2.07, 95th percentile 20.01, max 118.43. Mean B (416q) error is 18.64, median 7.24, max 50.52, 95th percentile 1172.86. # Next result comparing 665 (beefing up resolution on the -x and +y quadrant) to the 416 result (which had fairly useless, beefed-up resolution in the +x +y quadrant) # Mean A (665) error is 4.76, median 1.84, 95th percentile 16.38, max 96.79. Mean B (416) error is 5.23, median 2.11, 95th percentile is 18.74, max is 67.20. # Calling that one good enough. import matplotlib.pyplot as plt import seaborn as sns sns.set_style('white') fig, (ax1, ax2, ax3) = plt.subplots(1, 3) xnew, ynew = np.ogrid[xmin:xmax:1000j, ymin:ymax:1000j] #meshgrid(xx, yy) znew_a = spl_ec_312(xnew, ynew) znew_b = spl_ec_416(xnew, ynew) znew_c = spl_ec_1248(xnew, ynew) im1 = ax1.imshow(znew_a, cmap='viridis', extent=(ymin,ymax,xmin,xmax)) im2 = ax2.imshow(znew_b, cmap='viridis', extent=(ymin,ymax,xmin,xmax)) im3 = ax3.imshow(znew_c, cmap='viridis', extent=(ymin,ymax,xmin,xmax)) ax1.set_title('312') ax2.set_title('416') ax3.set_title('1248') plt.show() #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CHECKING EFFECT OF POINTS USED TO CONSTRUCT SPLINES ON SPLINE CALL SPEED (NO EFFECT) #--------------------------------------------------------------------------------------------------------------------------------------------------------- # The following test, conducted using splines based on 25, 100, or 900 points, shows that the number of points used to construct the spline has # no detectable effect on very short amount of time required to get call the spline function (about 3.5 microseconds). This means the only limiting # factor in the resolution of grid we're using to create the splines is how much computing time & storage space we have to calculate/hold the grids. # Note that the real optimal energy cost directly calculated from the model with tons of iterations is about 0.6127 J. #from timeit import timeit #timeit("spl_ec25(-2.55, 16.38)", setup="from __main__ import spl_ec25", number = 100000)/100000 #timeit("spl_ec100(-2.55, 16.38)", setup="from __main__ import spl_ec100", number = 100000)/100000 #timeit("spl_ec900(-2.55, 16.38)", setup="from __main__ import spl_ec900", number = 100000)/100000 #--------------------------------------------------------------------------------------------------------------------------------------------------------- # FINAL INTERPOLATION CODE (FROM CSV FILE) #--------------------------------------------------------------------------------------------------------------------------------------------------------- # Note: Using non-default options (for spline degree or smoothing) does not improve interpolation quality, as shown above. def
(file_to_interpolate): filedata = np.genfromtxt(file_to_interpolate, delimiter=',') xs = filedata[1:,0] # In sample data, x should go from -24 to 15 ys = filedata[0,1:-1] # In sample data, y should go from .001 to 18 data = filedata[1:,1:-1] # We have to trim off the last element of each row (nan) because of the trailing comma when saving return RectBivariateSpline(xs, ys, data) def interpolate_maneuver_cost(detection_point_3D, interpolation_function): x, y, z = detection_point_3D R = np.sqrt(y*y + z*z) matrix_2Dfrom3D = np.array([[1,0,0],[0,y/R,z/R],[0,-z/R,y/R]]) # matrix to rotate the 3-D detection point about the x-axis into the x-y plane (xrot, yrot) = matrix_2Dfrom3D.dot(np.array(detection_point_3D))[0:2] # 2-D detection point to use for the model, not yet sign-adjusted yrot = abs(yrot) # flip negative-y values to the positive half of the x-y plane for cost calculations return interpolation_function(xrot, yrot)[0,0] testpt = (-15, 5, 3) interp_ec = maneuver_cost_interpolation("/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/maneuvermodel/sample_data/interpolation_sample_data_energy_cost.csv") interpolate_maneuver_cost(testpt, interp_ec) from timeit import timeit timeit("interpolate_maneuver_cost(testpt, interp_ec)", setup="from __main__ import interpolate_maneuver_cost, testpt, interp_ec", number = 1000)/1000 # Could use Dill to serialize the interpolations, too # https://stackoverflow.com/questions/23997431/is-there-a-way-to-pickle-a-scipy-interpolate-rbf-object #--------------------------------------------------------------------------------------------------------------------------------------------------------- # TRANSFER THIS CODE TO AMAZON INSTANCES #--------------------------------------------------------------------------------------------------------------------------------------------------------- # Had to install ssh-askpass for this to work: https://github.com/theseal/ssh-askpass # Make sure to use the .pem version of the keyfile straight from Amazon, not the Putty .ppk version # Also have to chmod 400 the .pem file before it can be used. #import os #ec2_private_key = "'/Users/Jason/Dropbox/Amazon AWS/NeuswangerManeuverModelS3Instances.pem'" #module_folder = "'/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/maneuvermodel'" #creation_script = "'/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/maneuver_spreadsheet_creation.py'" #ec2_server_address = "ec2-34-216-120-173.us-west-2.compute.amazonaws.com" # NEED TO FREQUENTLY UPDATE #remote_folder = "~" #os.system("scp -r -i {0} {1} ec2-user@{2}:{3}".format(ec2_private_key, module_folder, ec2_server_address, remote_folder)) #command2 = "scp -i {0} {1} ec2-user@{2}:{3}".format(ec2_private_key, creation_script, ec2_server_address, remote_folder) ##print(command2) # WHEN THE SERVER IP CHANGES, NEED TO RE-RUN PRINTED SCP COMMAND IN TERMINAL TO GET AROUND ssh-askpass ERROR #os.system(command2) #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CLEAN UP INTERRUPTED PROCESSES #--------------------------------------------------------------------------------------------------------------------------------------------------------- db = pymysql.connect(host="troutnut.com", port=3306, user="jasonn5_calibtra", passwd="aVoUgLJyKo926", db="jasonn5_calibration_tracking", autocommit=True) #db = pymysql.connect(host="maneuver-model-tasks.crtfph6ctn2x.us-west-2.rds.amazonaws.com", port=3306, user="manmoduser", passwd="x]%o4g28", db="maneuver_model_tasks", autocommit=True) cursor = db.cursor() cursor.execute("UPDATE maneuver_model_tasks SET start_time = NULL WHERE end_time IS NULL") db.close()
maneuver_cost_interpolation
identifier_name
maneuver_spreadsheet_support.py
import numpy as np import pymysql from maneuvermodel import maneuveringfish, optimize from scipy.interpolate import RectBivariateSpline # had to sys.path.append("/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/") when it stopped finding the module #----------------------------------------------------------------------------------------------------------------------- # BASIC SUMMARY #----------------------------------------------------------------------------------------------------------------------- # This file is designed to explore the relevant boundaries for the interpolation tables, erring on the side of making # their coverage comfortably more expansive than needed. It includes justifications for the dimensions of the tables as # well as which tables are available, and it places job descriptions for the table calculator in a database. # Data will eventually be organized by subfolders by fork length and current speed, with files for response variable # (activity cost and pursuit duration) within each. Within those files, rows represent the x direction (first column # being x coordinate labels) and columns the y direction (first row being y labels). Tables are provided for the # positive half of the x-y planes; maneuvers elsewhere ar found by rotating into that plane. # We save activity cost and pursuit duration, which are the relevant numbers to plug into another foraging model, # but this must be done carefully to make sure SMR is also accounted for when calculating net rate of energy intake. # The most relevant "handling time" for a model is pursuit duration, under the assumption that wait time and # the return stage don't count as "handling" because the fish can be detecting and potentially pursuing other prey # during that time. Only the pursuit is time lost with regard to other #----------------------------------------------------------------------------------------------------------------------- # MODELING CHOICES, INCLUDING FIXED SETTINGS #----------------------------------------------------------------------------------------------------------------------- # It isn't feasible to precalculate values for every conceivable combination of settings, so we instead choose the most # likely applications and vary only the quantities mentioned in the Basic Summary. # # Wait times are enabled to give accurate energy costs for items detected far upstream. Our paper suggested that # including wait times resulted in a worse fit to real data for the model, but that was in a test assuming the fish # detected the item when it responded to it. We could not know when they actually first detected items or how long they # really waited in such cases. However, from a theoretical standpoint, excluding wait time would result in excessively # high predicted time/energy costs for items detected far upstream, which is probably not the case. To avoid penalizing # more effective prey detection, we have to include wait time. Users wishing to exclude it can use this script to make # their own, net set of tables. # # We use only energy cost and not total cost (including opportunity cost based on NREI), because it would require far # more calculations to account for different levels of possible NREI, and we found that the model excluding opportunity # costs was the best fit our maneuver data from real fish anyway. #--------------------------------------------------------------------------------------------------------------------------------------------------------- # BOUNDARY CALCULATIONS FOR THE TABLES #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CURRENT SPEED speeds_in_bodylengths = [attempt.mean_current_speed / attempt.fish.best_length for attempt in all_foraging_attempts] max(speeds_in_bodylengths) # 6.83 np.percentile(speeds_in_bodylengths, 99) # 5.48 np.percentile(speeds_in_bodylengths, 95) # 4.04 np.percentile(speeds_in_bodylengths, 50) # 1.46 # Maneuvers at high current speeds in bodylengths/s were not uncommon. # It wouldn't be too abnormal to have a 4 cm Chinook in 20 cm/s water (5 bodylengths) # But it would be very abnormal to have a 50 cm Grayling in 150 cm/s water (3 bodylengths) # # For distance: # The absolute longest was 9.2 bodylengths, next longest 8.8, then 8.1, then a couple dozen. # in the 7-6-5 range. The 95th percentile is 2.9 bodylengths and 99th percentile is 4.9 bodylengths. # We can probably assume some of the really long detections were extreme anomalies or misinterpretations of joint maneuvers. # # # I'm putting the database query inside the second but not third level of the nested list, to strike the balance between # query size and query count # Size appropriate velocity max is designed to exceed realistic foraging for fish of a given size and then some, while # still eliminating the work of calculating absurdly high velocities. max_velocity = 3.0 * fl + 50 # in cm/s and cm fork_lengths = list(np.concatenate([np.arange(3,5,0.2),np.arange(5,10,0.5),np.arange(10,20,1),np.arange(20,57,2),np.arange(58,80,3)])) time_per_number = 6.0 # seconds required to compute one cell in the spreadsheet bytes_per_number = 9.8 # bytes required to store one cell in the spreadsheet numbers_per_sheet = 999 # number of cells in each sheet, based on the resolution max_instances = 25 # max number of virtual machines running calculations queries = [] total_sheets = 0 total_bytes = 0 total_time = 0 all_velocities = list(np.concatenate([np.arange(1,19,2), np.arange(19, 40, 3), np.arange(40, 90, 5), np.arange(90,166,15)])) for fl in fork_lengths: size_appropriate_velocities = [v for v in all_velocities if v < 3.0 * fl + 50] for v in size_appropriate_velocities: total_sheets += 1 total_bytes += bytes_per_number * numbers_per_sheet total_time += time_per_number * numbers_per_sheet queries.append(f"INSERT INTO maneuver_model_tasks (fork_length, velocity) VALUES ({fl:.1f}, {v:.1f})") total_bytes *= 2 # because there are 2 response variables time_per_sheet = total_time / total_sheets real_time = (total_time / 3600) / max_instances print("Total calculation predicted to generate {0} sheets in {1:.1f} cpu-hours ({2:.1f} min/sheet, {4:.1f} hours for {5} instances) taking {3:.1f} mb of space.".format(total_sheets, total_time/3600.0, time_per_sheet/60.0, total_bytes/(1024.0*1024.0), real_time, max_instances)) # Actually generate the to-do list in the database -- ONLY DO THIS ONCE unless I am resetting the whole thing! # If I do reset the whole thing, I need to do ALTER TABLE maneuver_model_tasks AUTO_INCREMENT = 1 db = pymysql.connect(host="troutnut.com", port=3306, user="jasonn5_calibtra", passwd="aVoUgLJyKo926", db="jasonn5_calibration_tracking", autocommit=True) cursor = db.cursor() for i in range(len(queries)): print("Running query {0} of {1}.".format(i,len(queries))) exq = cursor.execute(queries[i]) db.close() #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CHECKING ACCURACY WITH WHICH SPLINE PREDICTS DIRECT MODEL PREDICTIONS and optimizing spline parameters #--------------------------------------------------------------------------------------------------------------------------------------------------------- # This code refers to all kinds of global variables from other sheets and will be a nuisance to reuse. It's not really meant for that. Just look at the # commented results below instead. import random # get xs, ys, ec from maneuver_spreadsheet_creation.py spl_ec_665 = RectBivariateSpline(xs, ys, ec) spl_ec_deg5 = RectBivariateSpline(xs, ys, ec, kx=5, ky=5) # kx, ky = spline degree... both 1 and 5 worked worse spl_efc_smooth = RectBivariateSpline(xs, ys, ec, s=2) # s = smoothing... looked good on plots but bad for results # worked very poorly with x=-12, y=5 errors_A = [] errors_B = [] for i in range(1,300): # Uniform test throughout the possible reaction distances or an inner subset thereof #distfact=1 #testx = random.uniform(min(xs)/distfact,max(xs)/distfact) #testy = random.uniform(min(ys)/distfact,max(ys)/distfact) # Test weighted to actual reaction distances (in bodylengths) by choosing randomly from real fish data from all_foraging_attempts in Maneuver Paper Calculations.py attempt = random.choice(all_foraging_attempts) testx = fork_length * attempt.reaction_vector[0] / attempt.fish.best_length testy = fork_length * attempt.lateral_reaction_distance / attempt.fish.best_length test_energy_model = optimize.optimal_maneuver(fish, detection_point_3D = (testx, testy, 0.0), popsize=4, variant_scale=1.5, mixing_ratio=3.0, iterations=4500, use_starting_iterations=True, num_starting_populations=12, num_starting_iterations=500).dynamics.activity_cost errors_A.append(100 * abs(spl_ec_665.ev(testx, testy) - test_energy_model) / test_energy_model) # percent error in log spline model errors_B.append(100 * abs(spl_ec_416.ev(testx, testy) - test_energy_model) / test_energy_model) # percent error in lin spline model print("Mean A (665) error is {0:.2f}, median {1:.2f}, 95th percentile {2:.2f}, max {3:.2f}. Mean B (416) error is {4:.2f}, median {5:.2f}, 95th percentile is {6:.2f}, max is {7:.2f}.".format(np.mean(errors_A), np.median(errors_A), percentile(errors_A,95), max(errors_A), np.mean(errors_B), np.median(errors_B), percentile(errors_B,95), max(errors_B))) # Within the region closest to the fish (distfact=5, inner 20 % of interpolated range), logspline works better: # Mean logspline error is 0.00897633464221, median 0.0044701166147, max 0.0864545485659. Mean linspline error is 0.0399583067863, median 0.0247217810159, max 0.125442996728. # For the inner 50% of extrapolated range (distfact=2): # Mean logspline error is 0.0484298747137, median 0.0206530488527, max 0.355140338201. Mean linspline error is 0.0270808114457, median 0.0120931308594, max 0.198272165424. # For the overall extrapolated region: # Mean logspline error is 0.128979262139, median 0.0599612297048, max 0.692983284836. Mean linspline error is 0.0114831437642, median 0.00342595840382, max 0.103078836909. # AFTER changing spline degree to 1 (linear) # distfact = 1 # Mean logspline error is 0.0218975898111, median 0.0113214302154, max 0.138234911992. Mean linspline error is 0.0125631421346, median 0.0035485055496, max 0.140522637627 # distfact = 2 # Mean logspline error is 0.0368812920421, median 0.0152525418311, max 0.198044035521. Mean linspline error is 0.0243640316147, median 0.0150866681404, max 0.147206404992. # distfact = 5 # Mean logspline error is 0.0137129814445, median 0.00836141861284, max 0.105787513153. Mean linspline error is 0.0393639917989, median 0.0210805421877, max 0.176547064472. # NEXT TEST: Increase resolution to 312 pts, and start using percent errors instead of regular errors, cover full realistic ranges # distfact = 1 # Mean logspline error is 6.61230781457, median 2.52211224078, max 81.438706048. Mean linspline error is 2.70886911207, median 0.140402890291, max 44.1715921604. # distfact = 2 # Mean logspline error is 5.13227646421, median 1.11771126799, max 64.1222831046. Mean linspline error is 15.9565636692, median 1.12503901849, max 632.523260832. # distfact = 5 # Mean logspline error is 5.95691325441, median 3.00161632168, max 82.3370962708. Mean linspline error is 8.76483038921, median 3.49808649369, max 84.3056318184. # Now using data from actual fish within the range where they're really doing stuff (N=300 for calculating these stats) # Mean logspline error is 6.22782654855, median 3.72433730964, max 42.6818144461. Mean linspline error is 15.4806385555, median 4.82063948279, max 323.389534253 # That is, dare I say, tolerable... but let's see if increasing iterations helps. # Now testing two models with either 312-point or 1248-point grids (both on log scale) used to create the spline # Mean 1248-pt error is 5.32348294783, median 1.49117256539, max 47.2459070847. Mean 312-pt error is 6.28938249563, median 3.69462175484, max 38.4551959213. # Not that much of an improvement. What if instead of increasing resolution so much, we increase iterations to smooth things out? # Using 12x500+4500 (both for extrapolation and test) # Switching from 6x500 to 12x500 for the preliminary solution really didn't help much. # Now trying out smoothing on the interpolation in model 312bs (smoothed at s=2 in RectBivariateSpline) # Mean 312b error is 5.9512430459, median 4.12127444535, max 66.6321313342. Mean 312bs error is 36.5312610831, median 22.2841087968, max 395.585373521. # Okay, so the smoothing looks good on the plots, but it's actually awful. # Next task is to see how much difference spline order makes, model 312bl, with linear instead of cubic spline interpolation: # Mean 312b error is 5.41455284206, median 3.74412497518, max 40.8658187381. Mean 312bl error is 35.3688630676, median 23.6634535005, max 488.033873058. # Okay, so the cubic spline is VASTLY better than linear interpolation. # Next test is with better interpolation grid that isn't so heavily weighted toward zero (starting log scale at 1.0 instead of 0.1, but adding in some short ones) # Mean 416 error is 6.49, median 3.00, 95th percentile 22.91, max 156.98. Mean 312 error is 6.54, median 4.08, max 20.75, 95th percentile 72.64. # Also can't hurt to give quintic splines a try... okay, they're no good. # Mean A (416) error is 5.12, median 2.07, 95th percentile 20.01, max 118.43. Mean B (416q) error is 18.64, median 7.24, max 50.52, 95th percentile 1172.86. # Next result comparing 665 (beefing up resolution on the -x and +y quadrant) to the 416 result (which had fairly useless, beefed-up resolution in the +x +y quadrant) # Mean A (665) error is 4.76, median 1.84, 95th percentile 16.38, max 96.79. Mean B (416) error is 5.23, median 2.11, 95th percentile is 18.74, max is 67.20. # Calling that one good enough. import matplotlib.pyplot as plt import seaborn as sns sns.set_style('white') fig, (ax1, ax2, ax3) = plt.subplots(1, 3) xnew, ynew = np.ogrid[xmin:xmax:1000j, ymin:ymax:1000j] #meshgrid(xx, yy) znew_a = spl_ec_312(xnew, ynew) znew_b = spl_ec_416(xnew, ynew) znew_c = spl_ec_1248(xnew, ynew) im1 = ax1.imshow(znew_a, cmap='viridis', extent=(ymin,ymax,xmin,xmax)) im2 = ax2.imshow(znew_b, cmap='viridis', extent=(ymin,ymax,xmin,xmax)) im3 = ax3.imshow(znew_c, cmap='viridis', extent=(ymin,ymax,xmin,xmax)) ax1.set_title('312') ax2.set_title('416') ax3.set_title('1248') plt.show() #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CHECKING EFFECT OF POINTS USED TO CONSTRUCT SPLINES ON SPLINE CALL SPEED (NO EFFECT) #--------------------------------------------------------------------------------------------------------------------------------------------------------- # The following test, conducted using splines based on 25, 100, or 900 points, shows that the number of points used to construct the spline has # no detectable effect on very short amount of time required to get call the spline function (about 3.5 microseconds). This means the only limiting # factor in the resolution of grid we're using to create the splines is how much computing time & storage space we have to calculate/hold the grids. # Note that the real optimal energy cost directly calculated from the model with tons of iterations is about 0.6127 J. #from timeit import timeit #timeit("spl_ec25(-2.55, 16.38)", setup="from __main__ import spl_ec25", number = 100000)/100000 #timeit("spl_ec100(-2.55, 16.38)", setup="from __main__ import spl_ec100", number = 100000)/100000 #timeit("spl_ec900(-2.55, 16.38)", setup="from __main__ import spl_ec900", number = 100000)/100000 #--------------------------------------------------------------------------------------------------------------------------------------------------------- # FINAL INTERPOLATION CODE (FROM CSV FILE) #--------------------------------------------------------------------------------------------------------------------------------------------------------- # Note: Using non-default options (for spline degree or smoothing) does not improve interpolation quality, as shown above. def maneuver_cost_interpolation(file_to_interpolate): filedata = np.genfromtxt(file_to_interpolate, delimiter=',') xs = filedata[1:,0] # In sample data, x should go from -24 to 15 ys = filedata[0,1:-1] # In sample data, y should go from .001 to 18 data = filedata[1:,1:-1] # We have to trim off the last element of each row (nan) because of the trailing comma when saving return RectBivariateSpline(xs, ys, data) def interpolate_maneuver_cost(detection_point_3D, interpolation_function): x, y, z = detection_point_3D R = np.sqrt(y*y + z*z) matrix_2Dfrom3D = np.array([[1,0,0],[0,y/R,z/R],[0,-z/R,y/R]]) # matrix to rotate the 3-D detection point about the x-axis into the x-y plane (xrot, yrot) = matrix_2Dfrom3D.dot(np.array(detection_point_3D))[0:2] # 2-D detection point to use for the model, not yet sign-adjusted yrot = abs(yrot) # flip negative-y values to the positive half of the x-y plane for cost calculations return interpolation_function(xrot, yrot)[0,0]
from timeit import timeit timeit("interpolate_maneuver_cost(testpt, interp_ec)", setup="from __main__ import interpolate_maneuver_cost, testpt, interp_ec", number = 1000)/1000 # Could use Dill to serialize the interpolations, too # https://stackoverflow.com/questions/23997431/is-there-a-way-to-pickle-a-scipy-interpolate-rbf-object #--------------------------------------------------------------------------------------------------------------------------------------------------------- # TRANSFER THIS CODE TO AMAZON INSTANCES #--------------------------------------------------------------------------------------------------------------------------------------------------------- # Had to install ssh-askpass for this to work: https://github.com/theseal/ssh-askpass # Make sure to use the .pem version of the keyfile straight from Amazon, not the Putty .ppk version # Also have to chmod 400 the .pem file before it can be used. #import os #ec2_private_key = "'/Users/Jason/Dropbox/Amazon AWS/NeuswangerManeuverModelS3Instances.pem'" #module_folder = "'/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/maneuvermodel'" #creation_script = "'/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/maneuver_spreadsheet_creation.py'" #ec2_server_address = "ec2-34-216-120-173.us-west-2.compute.amazonaws.com" # NEED TO FREQUENTLY UPDATE #remote_folder = "~" #os.system("scp -r -i {0} {1} ec2-user@{2}:{3}".format(ec2_private_key, module_folder, ec2_server_address, remote_folder)) #command2 = "scp -i {0} {1} ec2-user@{2}:{3}".format(ec2_private_key, creation_script, ec2_server_address, remote_folder) ##print(command2) # WHEN THE SERVER IP CHANGES, NEED TO RE-RUN PRINTED SCP COMMAND IN TERMINAL TO GET AROUND ssh-askpass ERROR #os.system(command2) #--------------------------------------------------------------------------------------------------------------------------------------------------------- # CLEAN UP INTERRUPTED PROCESSES #--------------------------------------------------------------------------------------------------------------------------------------------------------- db = pymysql.connect(host="troutnut.com", port=3306, user="jasonn5_calibtra", passwd="aVoUgLJyKo926", db="jasonn5_calibration_tracking", autocommit=True) #db = pymysql.connect(host="maneuver-model-tasks.crtfph6ctn2x.us-west-2.rds.amazonaws.com", port=3306, user="manmoduser", passwd="x]%o4g28", db="maneuver_model_tasks", autocommit=True) cursor = db.cursor() cursor.execute("UPDATE maneuver_model_tasks SET start_time = NULL WHERE end_time IS NULL") db.close()
testpt = (-15, 5, 3) interp_ec = maneuver_cost_interpolation("/Users/Jason/Dropbox/Drift Model Project/Calculations/driftmodeldev/maneuvermodel/sample_data/interpolation_sample_data_energy_cost.csv") interpolate_maneuver_cost(testpt, interp_ec)
random_line_split
tools.js
var mod = angular.module("toolsMod", []); mod.controller("toolsCtrl", ["$scope", "$sce", function($scope, $sce) { var DIR = 'images/apps/'; $scope.introText = $sce.trustAsHtml("The DCIC develops web-based tools for integrative data access and visualization across the distributed LINCS and BD2K sites and other relevant data sources. Our next generation integrated web-based platform for the LINCS project serves as the foundation for all LINCS activities and federates all LINCS data, signatures, analysis algorithms, pipelines, APIs and web tools."); $scope.tools = [ { title: "Concierge", description: "Provides multiple points of entry to the LINCS tools and databases.", url: "/getting-started/concierge", target: "_blank", image: DIR + "concierge_bell.svg", shortDesc: "Interactive Introduction to LINCS Tools and Resources" }, { title: "LINCS Data Portal", description: "Features for searching and exploring LINCS dataset packages and reagents.", url: "http://lincsportal.ccs.miami.edu/dcic-portal/", target: "_blank", image: DIR + "portal.png", docsUrl: "", shortDesc: "Access to LINCS Data and Signatures" }, { title: "L1000CDS2", description: "L1000CDS<sup>2</sup> queries gene expression signatures against the LINCS L1000 to identify and prioritize small molecules that can reverse or mimic the observed input expression pattern.", url: "http://amp.pharm.mssm.edu/L1000CDS2/", target: "_blank", image: DIR + "l1000cds2.png", docsUrl: "http://amp.pharm.mssm.edu/L1000CDS2/help/#api", shortDesc: "L1000 Characteristic Direction Signature Search Engine", isHtml: true }, { title: "iLINCS", description: "An integrative web platform for analysis of LINCS data and signatures.", url: "http://www.ilincs.org", target: "_blank", image: DIR + "ilincs.png", docsUrl: "http://www.ilincs.org/ilincs/APIinfo", shortDesc: "LINCS Web Portal" }, { title: "piLINCS", description: "A seamless user interface and intermediate API for accessing LINCS proteomics datasets (P100, GCP, etc.) on Panorama.", url: "http://eh3.uc.edu/pilincs", target: "_blank", image: DIR + "pilincs.png", docsUrl: "http://eh3.uc.edu/pilincs/#/api", shortDesc: "Interface to panoramaweb.org" }, { title: "LINCS Joint Project - Breast Cancer Network Browser", description: "LJP-BCNB visualizes thousands of signatures from six breast cancer cell lines treated with ~100 single molecule perturbations, mostly kinase inhibitors.", url: "http://amp.pharm.mssm.edu/LJP/", target: "_blank", image: DIR + "ljp.png", docsUrl: "", shortDesc: "LINCS Joint Project Network Enrichment Tool" }, { title: "L1000FWD", description: "L1000FWD provides interactive visualization of over 16,000 drug and small-molecule induced gene expression signatures.", url: "http://amp.pharm.mssm.edu/L1000FWD/", target: "_blank", image: DIR + "L1000FWD_logo.png", docsUrl: "http://amp.pharm.mssm.edu/l1000fwd/api_page", shortDesc: "L1000 Fireworks Display" }, { title: "DGB", description: "An application for ranking drugs to modulate a specific gene based on transcriptomic signatures.", url: "http://amp.pharm.mssm.edu/DGB/", target: "_blank", image: DIR + "dgb.jpg", shortDesc: "Drug Gene Budger" }, { title: "GRcalculator", description: "Interactive website for calculation, analysis, and visualization of dose-response data using the GR approach.", url: "http://www.grcalculator.org/grtutorial/Home.html", target: "_blank", image: DIR + "GR.png", docsUrl: "", shortDesc: "Tool for Calculating and Mining Dose-Response Data" }, { title: "SEP L1000", description: "SEP L1000 is a web portal to share predicted ADRs.", url: "http://maayanlab.net/SEP-L1000/", target: "_blank", image: DIR + "sep.png", docsUrl: "", shortDesc: "Side Effect Prediction Based on L1000 Data" }, { title: "Slicr", description: "Slicr is a metadata search engine that searches for LINCS L1000 gene expression profiles and signatures matching user's input parameters.", url: "http://amp.pharm.mssm.edu/Slicr", target: "_blank", image: DIR + "slicr2.png", docsUrl: "", shortDesc: "LINCS L1000 Slicr [GSE70138 data only]" }, { title: "SynergySeq", description: "Ranking of the LINCS L1000 compounds based on their transcriptional similarity to a reference compound and their reversal of a disease signature.", url: "https://schurerlab.shinyapps.io/synergyseq/", target: "_blank", image: DIR + "synergyseq.jpg", docsUrl: "", shortDesc: "Identify Synergistic Drug Combinations" },
title: "Harmonizome", description: "Built on top of information about genes and proteins from 114 datasets, the Harmonizome is a knowledge engine for a diverse set of integrated resources.", url: "http://amp.pharm.mssm.edu/Harmonizome", target: "_blank", image: DIR + "harmonizome.png", docsUrl: "http://amp.pharm.mssm.edu/Harmonizome/documentation", shortDesc: "Biological Knowledge Engine" }, { title: "Enrichr", description: "An easy to use intuitive enrichment analysis web-based tool providing various types of visualization summaries of collective functions of gene lists.", url: "http://amp.pharm.mssm.edu/Enrichr/", target: "_blank", image: DIR + "enrichr.png", docsUrl: "http://amp.pharm.mssm.edu/Enrichr/help#api", shortDesc: "Search Engine for Gene Lists and Signatures" }, { title: "modEnrichr", description: "A suite of gene set enrichment analysis tools for model organisms.", url: "https://amp.pharm.mssm.edu/modEnrichr/", target: "_blank", image: DIR + "modEnrichr.jpg", docsUrl: "", shortDesc: "Expansion of Enrichr for Four Model Organisms: Fish, Fly, Worm and Yeast" }, { title: "BioJupies", description: "A web application that enables the automated creation, storage, and deployment of Jupyter Notebooks containing RNA-seq data analyses.", url: "https://amp.pharm.mssm.edu/biojupies/", target: "_blank", image: DIR + "biojupies_logo.jpg", shortDesc: "Automated Generation of Interactive Notebooks for RNA-Seq Data Analysis in the Cloud" }, { title: "ARCHS4", description: "A web resource that makes the majority of previously published RNA-seq data from human and mouse freely available at the gene count level.", url: "http://amp.pharm.mssm.edu/archs4", target: "_blank", image: DIR + "archs4_icon_720.png", shortDesc: "All RNA-seq and CHIP-seq Signature Search Space" }, { title: "Geneshot", description: "Submit biomedical terms to receive ranked lists of relevant genes.", url: "https://amp.pharm.mssm.edu/geneshot/", target: "_blank", image: DIR + "geneshot.jpg", docsUrl: "https://amp.pharm.mssm.edu/geneshot/api.html", shortDesc: "Search Engine for Ranking Genes from Arbitrary Text Queries" }, { title: "ChEA3", description: "Transcription factor enrichment analysis by orthogonal omics integration.", url: "https://amp.pharm.mssm.edu/ChEA3", target: "_blank", image: DIR + "chea3logo.jpg", docsUrl: "https://amp.pharm.mssm.edu/chea3/index.html#content4-z", shortDesc: "ChIP-X Enrichment Analysis 3" }, { title: "X2K Web", description: "Computationally predicts involvement of upstream cell signaling pathways, given a signature of differentially expressed genes.", url: "http://X2K.cloud", target: "_blank", image: DIR + "expression2kinases.jpg", docsUrl: "https://amp.pharm.mssm.edu/X2K/#api", shortDesc: "eXpression2Kinases Web" }, { title: "Gen3va", description: "Aggregates and analyzes gene expression signatures extracted from GEO by the crowd using GEO2Enrichr.", url: "http://amp.pharm.mssm.edu/gen3va", target: "_blank", image: DIR + "gen3va_logo.png", docsUrl: "http://amp.pharm.mssm.edu/gen3va/documentation#api", shortDesc: "Gene Expression and Enrichment Vector Analyzer" }, { title: "CREEDS", description: "Collections of processed gene, drug and disease signatures from GEO.", url: "http://amp.pharm.mssm.edu/CREEDS/", target: "_blank", image: DIR + "creeds_logo.fw.png", docsUrl: "http://amp.pharm.mssm.edu/CREEDS/#help", shortDesc: "Crowd Extracted Expression of Differential Signatures" }, { title: "Datasets2Tools", description: "A repository indexing 31,473 canned bioinformatics analyses applied to 6,431 datasets.", url: "http://amp.pharm.mssm.edu/datasets2tools", target: "_blank", image: DIR + "d2t_icon_720.png", docsUrl: "http://amp.pharm.mssm.edu/datasets2tools/api", shortDesc: "Repository and Search Engine for Bioinformatics Datasets, Tools and Canned Analyses" }, { title: "GEO2Enrichr", description: "A browser extension and web application to extract gene sets from GEO and analyze these lists for common biological functions.", url: "http://amp.pharm.mssm.edu/g2e/", target: "_blank", image: DIR + "g2e.png", docsUrl: "http://amp.pharm.mssm.edu/g2e/documentation", shortDesc: "Differential Expression Analysis Tool" }, { title: "GREIN", description: "An interactive web platform for re-analyzing GEO RNA-seq data.", url: "https://shiny.ilincs.org/grein", target: "_blank", image: DIR + "grein_logo.jpg", docsUrl: "", shortDesc: "GEO RNA-seq Experiments Interactive Navigator" }, { title: "PAEA", description: "PAEA is a new R/Shiny gene set enrichment web application with over 70 gene set libraries available for enrichment analysis.", url: "http://amp.pharm.mssm.edu/PAEA/", target: "_blank", image: DIR + "paea.png", docsUrl: "http://amp.pharm.mssm.edu/PAEA/#api", shortDesc: "Principal Angle Enrichment Analysis" }, //{ // title: "LINCS Information Framework (LIFE)", // description: "Integrates all LINCS content leveraging a semantic knowledge model and common LINCS metadata standards.", // url: "http://life.ccs.miami.edu/life/", // target: "_blank", // image: DIR + "life.png", // docsUrl: "", // shortDesc: "LINCS Information System" //}, { title: "LINCS Canvas Browser", description: "The LINCS Canvas Browser is an interactive web app to query, browse and interrogate LINCS L1000 gene expression signatures.", url: "http://www.maayanlab.net/LINCS/LCB", target: "_blank", image: DIR + "lincs-canvas-browser.png", docsUrl: "http://www.maayanlab.net/LINCS/LCB/LCB-Tutorial-042314.pdf", shortDesc: "LINCS L1000 Clustering, Visualization and Enrichment Analysis Tool" }, { title: "Drug/Cell-line Browser", description: "An online interactive HTML5 data visualization tool for interacting with three of the recently published datasets of cancer cell lines/drug-viability studies.", url: "http://www.maayanlab.net/LINCS/DCB/", target: "_blank", image: DIR + "drug-cell-line-browser.png", docsUrl: "http://www.maayanlab.net/LINCS/DCB/DCB%20Manual.pdf", shortDesc: "Data Visualization Tool" }, { title: "Cite-D-Lite", description: "Functions on specific pages of GEO, PubMed, and DataMed. It has two functions: (1) to create downloadable citations for GEO data and PubMed articles and (2) to highlight the most important sentences in PubMed abstracts in a graded manner (based on TextRank algorithm).", url: "https://chrome.google.com/webstore/detail/cite-d-lite/ipiffhgeigmiffclkpkgdaklbdgdegkk", target: "_blank", image: DIR + "cdl_icon_720.png", shortDesc: "Chrome Extension for Data and Paper Citations with Text Importance Highlighting" }, { title: "Network2Canvas", description: "A web application that provides an alternative way to view networks and visualizes them by placing nodes on a square toroidal canvas.", url: "http://www.maayanlab.net/N2C/", target: "_blank", image: DIR + "network2canvas.png", docsUrl: "http://www.maayanlab.net/N2C/help.html", shortDesc: "Network Visualization on a Canvas with Enrichment Analysis" }, { title: "GUIdock", description: "A method for deploying containers with a graphical user interface.", url: "https://github.com/WebDataScience/GUIdock", target: "_blank", image: DIR + "gui_dock.fw.png", docsUrl: "https://github.com/WebDataScience/GUIdock", shortDesc: "Using Docker Containers with a Common Graphics User Interface to Address the Reproducibility of Research" }, ]; //$scope.docentWarning = "*Experiments with content that may not be up to date."; //$scope.lincsVizTools = [ //{ // title: "Docent - Grid view", // description: "Docent's grid view provides two interfaces for searching LINCS data by assay, perturbagen, cell, and readout.", // url: "http://amp.pharm.mssm.edu/milestones/grid.html", // target: "_blank", // image: DIR + "docent-grid.png", // shortDesc: "Searchable overview of the LINCS Consortium's datasets." // }, // { // title: "Docent - List view", // description: "Docent's list view provides an interactive matrix of the most studied cell lines by assay.", // url: "http://amp.pharm.mssm.edu/milestones/product.html", // target: "_blank", // image: DIR + "docent-list.png", // shortDesc: "Overview of the LINCS Consortium's datasets by assay and cell type." // }, // { // title: "Docent - Card view", // description: "Docent's card view provides an interactive matrix of the most studied cell lines by assay.", // url: "http://amp.pharm.mssm.edu/milestones/cards.html", // target: "_blank", // image: DIR + "docent-card.png", // shortDesc: "Overview of the LINCS Consortium's datasets by assay and cell type." // } // ]; // $scope.lincsVizToolsArchived = [ // { // title: "Docent I", // description: "Docent provides a quantified, interactive view of the biological entities such as cell lines, small molecules, and assays within LINCS data.", // url: "http://amp.pharm.mssm.edu/public/docent/", // target: "_blank", // image: DIR + "docent-i.png", // shortDesc: "Overview guide into LINCS data" // }, // { // title: "Docent II", // description: "Docent II provides an interactive plot of available and upcoming data from the LINCS DSGCs.", // url: "http://amp.pharm.mssm.edu/milestonesViz/", // target: "_blank", // image: DIR + "docent-ii.png", // shortDesc: "Overview of the LINCS Data and Signature Generation Centers (DSGCs) Data Release Milestones" // } // ]; }]);
{
random_line_split
test_pessismistic_multi_tables.go
package main import ( "context" "database/sql" "errors" "flag" "fmt" "log" "math/rand" "strconv" "strings" "sync" "sync/atomic" "time" _ "github.com/go-sql-driver/mysql" "modernc.org/mathutil" ) var ( addr = flag.String("addr", "127.0.0.1:4000", "tidb address") concurrency = flag.Int("concurrency", 32, "concurrency") loadData = flag.Bool("load-data", false, "load data before run") tableSize = flag.Uint64("table-size", 400000, "table size") ignoreO = flag.String("ignore-o", "9007,1105", "ignored error code for optimistic transaction, separated by comma") ignoreP = flag.String("ignore-p", "1213", "ignored error code for pessimistic transaction, separated by comma") mode = flag.String("mode", "mix", "transaction mode, mix|pessimistic|optimistic") tables = flag.Int("tables", 4, "number of test tables") insertDelete = flag.Bool("insert-delete", false, "run insert delete transactions") ignoreCodesO []int ignoreCodesP []int successTxn uint64 failTxn uint64 ) const numPartitions = 4 func main() { flag.Parse() parts := strings.Split(*ignoreP, ",") for _, part := range parts { iv, _ := strconv.Atoi(part) ignoreCodesP = append(ignoreCodesP, iv) } parts = strings.Split(*ignoreO, ",") for _, part := range parts { iv, _ := strconv.Atoi(part) ignoreCodesO = append(ignoreCodesO, iv) } db, err := sql.Open("mysql", "root:@tcp("+*addr+")/test") if err != nil { log.Fatal(err) } if *loadData { err = LoadData(db, *tableSize) if err != nil { log.Fatal(err) } return } wg := new(sync.WaitGroup) wg.Add(*concurrency) for i := 0; i < *concurrency; i++ { se, err := NewSession(db, uint64(i), *tableSize, numPartitions) if err != nil { log.Fatal(err) } go se.Run(wg) } if !*insertDelete { go checkLoop(db) } go statsLoop() wg.Wait() } const batchSize = 100 func LoadData(db *sql.DB, maxSize uint64) error { for i := 0; i < *tables; i++ { tableName := fmt.Sprintf("t%d", i) log.Printf("loading table: %s\n", tableName) if _, err := db.Exec(fmt.Sprintf("drop table if exists %s", tableName)); err != nil { return nil } createTableStmt := fmt.Sprintf("create table %s ("+ "id bigint primary key,"+ "u bigint unsigned unique,"+ "i bigint, c bigint,"+ "index i (i))", tableName) if _, err := db.Exec(createTableStmt); err != nil { return err } for i := uint64(0); i < maxSize; i += batchSize { if _, err := db.Exec(insertSql(tableName, i)); err != nil { return err } } } return nil } func insertSql(tableName string, beginID uint64) string { var values []string for i := beginID; i < beginID+batchSize; i++ { value := fmt.Sprintf("(%d, %d, %d, %d)", i, i, i, 0) values = append(values, value) } return fmt.Sprintf("insert %s values %s", tableName, strings.Join(values, ",")) } type Session struct { seID uint64 isPessimistic bool conn *sql.Conn stmts []func(ctx context.Context) error ran *randIDGenerator addedCount int txnStart time.Time commitStart time.Time } func NewSession(db *sql.DB, seID, maxSize uint64, numPartitions uint64) (*Session, error) { ctx := context.Background() con, err := db.Conn(ctx) if err != nil { return nil, err } se := &Session{ seID: seID, conn: con, ran: newRandIDGenerator(maxSize, numPartitions), } switch *mode { case "pessimistic": se.isPessimistic = true case "mix": se.isPessimistic = se.seID%2 == 1 } se.stmts = append(se.stmts, se.updateIndex, se.updateIndexRange, se.updateRange, se.updateUniqueIndex, se.replace, se.deleteInsert, se.selectForUpdate, se.plainSelect, ) return se, nil } func (se *Session) runTransaction(parent context.Context) error
func getErrorCode(err error) int { var code int _, err1 := fmt.Sscanf(err.Error(), "Error %d:", &code) if err1 != nil { return -1 } return code } func (se *Session) handleError(ctx context.Context, err error, isCommit bool) { atomic.AddUint64(&failTxn, 1) _, _ = se.conn.ExecContext(ctx, "rollback") code := getErrorCode(err) ignoreCodes := ignoreCodesO if se.isPessimistic { ignoreCodes = ignoreCodesP } for _, ignoreCode := range ignoreCodes { if ignoreCode == code { return } } txnMode := "optimistic" if se.isPessimistic { txnMode = "pessimistic" } if isCommit { log.Println(txnMode, "txnDur", time.Since(se.txnStart), "commitDur", time.Since(se.commitStart), err) } else { log.Println(txnMode, se.isPessimistic, "txnDur", time.Since(se.txnStart), err) } } func (se *Session) Run(wg *sync.WaitGroup) { defer wg.Done() ctx := context.Background() runFunc := se.runTransaction for { if err := runFunc(ctx); err != nil { log.Println("begin error", err) return } } } func (se *Session) deleteInsert(ctx context.Context) error { return nil // TODO: add deleteInsert for multiple tables //rowID := se.ran.nextRowID() //row := se.conn.QueryRowContext(ctx, fmt.Sprintf("select c from t where id = %d for update", rowID)) //var cnt int64 //err := row.Scan(&cnt) //if err != nil { // return err //} //if err = se.executeDML(ctx, "delete from t where id = %d", rowID); err != nil { // return err //} //return se.executeDML(ctx, "insert t values (%d, %d, %d, %d)", // rowID, se.ran.nextUniqueIndex(), rowID, cnt+2) } func (se *Session) replace(ctx context.Context) error { return nil // TODO: add replace for multiple tables //rowID := se.ran.nextRowID() //row := se.conn.QueryRowContext(ctx, fmt.Sprintf("select c from t where id = %d for update", rowID)) //var cnt int64 //err := row.Scan(&cnt) //if err != nil { // return err //} //// When replace on existing records, the semantic is equal to `delete then insert`, so the cnt //return se.executeDML(ctx, "replace t values (%d, %d, %d, %d)", // rowID, se.ran.nextUniqueIndex(), rowID, cnt+2) } func (se *Session) updateSimple(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() if err := se.executeDML(ctx, "update t%d set c = c - 1 where id = %d", fromTableID, fromRowID); err != nil { return err } return se.executeDML(ctx, "update t%d set c = c + 1 where id = %d", toTableID, toRowID) } func (se *Session) updateIndex(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() if err := se.executeDML(ctx, "update t%d set i = i + 1, c = c - 1 where id = %d", fromTableID, fromRowID); err != nil { return err } return se.executeDML(ctx, "update t%d set i = i + 1, c = c + 1 where id = %d", toTableID, toRowID) } // updateUniqueIndex make sure there is no conflict on the unique index by randomly generate the unique index value func (se *Session) updateUniqueIndex(ctx context.Context) error { return se.executeDML(ctx, "update t%d set u = %d, c = c where id = %d", se.ran.nextTableID(), se.ran.nextUniqueIndex(), se.ran.nextRowID()) } func (se *Session) updateRange(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() beginRowID := mathutil.MinUint64(fromRowID, toRowID) + 2 if err := se.executeDML(ctx, "update t%d set c = c - 1 where id between %d and %d", fromTableID, beginRowID, beginRowID+8); err != nil { return err } return se.executeDML(ctx, "update t%d set c = c + 1 where id between %d and %d", toTableID, beginRowID, beginRowID+8) } func (se *Session) updateIndexRange(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() beginRowID := mathutil.MinUint64(fromRowID, toRowID) + 2 if err := se.executeDML(ctx, "update t%d set i = i + 1, c = c - 1 where id between %d and %d", fromTableID, beginRowID, beginRowID+8); err != nil { return err } return se.executeDML(ctx, "update t%d set i = i + 1, c = c + 1 where id between %d and %d", toTableID, beginRowID, beginRowID+8) } func (se *Session) plainSelect(ctx context.Context) error { tableID := se.ran.nextTableID() beginRowID := se.ran.nextRowID() endRowID := beginRowID + 10 return se.executeSelect(ctx, "select * from t%d where id between %d and %d", tableID, beginRowID, endRowID) } func (se *Session) selectForUpdate(ctx context.Context) error { return se.executeSelect(ctx, "select * from t%d where id in (%d, %d) for update", se.ran.nextTableID(), se.ran.nextRowID(), se.ran.nextRowID()) } func (se *Session) executeDML(ctx context.Context, sqlFormat string, args ...interface{}) error { sql := fmt.Sprintf(sqlFormat, args...) res, err := se.conn.ExecContext(ctx, sql) if err != nil { return err } affected, err := res.RowsAffected() if err != nil { return err } if affected == 0 { return errors.New("affected row is 0, " + sql) } se.addedCount += int(affected) return nil } func (se *Session) executeSelect(ctx context.Context, sqlFormat string, args ...interface{}) error { sql := fmt.Sprintf(sqlFormat, args...) rows, err := se.conn.QueryContext(ctx, sql) if err != nil { return err } for rows.Next() { } if rows.Err() != nil { return rows.Err() } return rows.Close() } func (se *Session) executeCommit(ctx context.Context) error { _, err := se.conn.ExecContext(ctx, "commit") return err } func (se *Session) reset() { se.ran.allocated = map[uint64]struct{}{} se.addedCount = 0 se.txnStart = time.Now() } // randIDGenerator generates random ID that combines round-robin and zipf distribution and make sure not duplicated. type randIDGenerator struct { allocated map[uint64]struct{} zipf *rand.Zipf uniform *rand.Rand tableNum int max uint64 partitionSize uint64 numPartitions uint64 partitionIdx uint64 } func (r *randIDGenerator) nextTableAndRowPairs() (fromTableID, toTableID int, fromRowID, toRowID uint64) { fromTableID = r.nextTableID() toTableID = r.nextTableID() fromRowID = r.nextRowID() toRowID = r.nextRowID() return } func (r *randIDGenerator) nextTableID() int { return r.uniform.Intn(r.tableNum) } func (r *randIDGenerator) nextRowID() uint64 { return r.uniform.Uint64() % *tableSize } func (r *randIDGenerator) reset() { r.allocated = map[uint64]struct{}{} } func (r *randIDGenerator) nextNumStatements(n int) int { return r.uniform.Intn(n) } func (r *randIDGenerator) nextUniqueIndex() uint64 { return r.uniform.Uint64() } func newRandIDGenerator(maxSize uint64, numPartitions uint64) *randIDGenerator { partitionSize := maxSize / numPartitions src := rand.NewSource(time.Now().UnixNano()) ran := rand.New(src) zipf := rand.NewZipf(ran, 1.01, 1, partitionSize-1) return &randIDGenerator{ allocated: map[uint64]struct{}{}, zipf: zipf, uniform: ran, max: maxSize, tableNum: *tables, partitionSize: maxSize / numPartitions, numPartitions: numPartitions, } } func statsLoop() { ticker := time.NewTicker(time.Second * 10) lastSuccess := uint64(0) lastFail := uint64(0) for { <-ticker.C curSuccess := atomic.LoadUint64(&successTxn) curFail := atomic.LoadUint64(&failTxn) log.Printf("tps(success:%v fail:%v)\n", float64(curSuccess-lastSuccess)/10, float64(curFail-lastFail)/10) lastSuccess = curSuccess lastFail = curFail } } func checkLoop(db *sql.DB) { ctx := context.Background() conn, err := db.Conn(ctx) if err != nil { panic(err) } ticker := time.NewTicker(time.Second * 30) tableNames := make([]string, *tables) for i := 0; i < *tables; i++ { tableNames[i] = fmt.Sprintf("select c from t%d", i) } allSumStmt := fmt.Sprintf("select sum(c) from (%s) tall", strings.Join(tableNames, " union all ")) for { <-ticker.C checkCount(conn, allSumStmt, 0) for i := 0; i < *tables; i++ { checkCount(conn, fmt.Sprintf("select count(*) from t%d use index (i)", i), int64(*tableSize)) checkCount(conn, fmt.Sprintf("select count(*) from t%d use index (u)", i), int64(*tableSize)) } } } func checkCount(conn *sql.Conn, sql string, expected int64) { row := conn.QueryRowContext(context.Background(), sql) var c int64 if err := row.Scan(&c); err != nil { log.Println("check", sql, err) } else { if c != expected { panic(fmt.Sprintf("data inconsistency, %s is %d, expecte %d", sql, c, expected)) } } }
{ se.reset() ctx, cancel := context.WithTimeout(parent, time.Minute) defer cancel() beginSQL := "begin /*!90000 optimistic */" if se.isPessimistic { beginSQL = "begin /*!90000 pessimistic */" } _, err := se.conn.ExecContext(ctx, beginSQL) if err != nil { return err } numStmts := 1 + se.ran.uniform.Intn(5) for i := 0; i < numStmts; i++ { stmtType := se.ran.uniform.Intn(len(se.stmts)) f := se.stmts[stmtType] err = f(ctx) if err != nil { se.handleError(ctx, err, false) return nil } } se.commitStart = time.Now() _, err = se.conn.ExecContext(ctx, "commit") if err != nil { se.handleError(ctx, err, true) } else { atomic.AddUint64(&successTxn, 1) } return nil }
identifier_body
test_pessismistic_multi_tables.go
package main import ( "context" "database/sql" "errors" "flag" "fmt" "log" "math/rand" "strconv" "strings" "sync" "sync/atomic" "time" _ "github.com/go-sql-driver/mysql" "modernc.org/mathutil" ) var ( addr = flag.String("addr", "127.0.0.1:4000", "tidb address") concurrency = flag.Int("concurrency", 32, "concurrency") loadData = flag.Bool("load-data", false, "load data before run") tableSize = flag.Uint64("table-size", 400000, "table size") ignoreO = flag.String("ignore-o", "9007,1105", "ignored error code for optimistic transaction, separated by comma") ignoreP = flag.String("ignore-p", "1213", "ignored error code for pessimistic transaction, separated by comma") mode = flag.String("mode", "mix", "transaction mode, mix|pessimistic|optimistic") tables = flag.Int("tables", 4, "number of test tables") insertDelete = flag.Bool("insert-delete", false, "run insert delete transactions") ignoreCodesO []int ignoreCodesP []int successTxn uint64 failTxn uint64 ) const numPartitions = 4 func main() { flag.Parse() parts := strings.Split(*ignoreP, ",") for _, part := range parts { iv, _ := strconv.Atoi(part) ignoreCodesP = append(ignoreCodesP, iv) } parts = strings.Split(*ignoreO, ",") for _, part := range parts { iv, _ := strconv.Atoi(part) ignoreCodesO = append(ignoreCodesO, iv) } db, err := sql.Open("mysql", "root:@tcp("+*addr+")/test") if err != nil { log.Fatal(err) } if *loadData { err = LoadData(db, *tableSize) if err != nil { log.Fatal(err) } return } wg := new(sync.WaitGroup) wg.Add(*concurrency) for i := 0; i < *concurrency; i++ { se, err := NewSession(db, uint64(i), *tableSize, numPartitions) if err != nil { log.Fatal(err) } go se.Run(wg) } if !*insertDelete { go checkLoop(db) } go statsLoop() wg.Wait() } const batchSize = 100 func LoadData(db *sql.DB, maxSize uint64) error { for i := 0; i < *tables; i++ { tableName := fmt.Sprintf("t%d", i) log.Printf("loading table: %s\n", tableName) if _, err := db.Exec(fmt.Sprintf("drop table if exists %s", tableName)); err != nil { return nil } createTableStmt := fmt.Sprintf("create table %s ("+ "id bigint primary key,"+ "u bigint unsigned unique,"+ "i bigint, c bigint,"+ "index i (i))", tableName) if _, err := db.Exec(createTableStmt); err != nil { return err } for i := uint64(0); i < maxSize; i += batchSize { if _, err := db.Exec(insertSql(tableName, i)); err != nil { return err } } } return nil } func insertSql(tableName string, beginID uint64) string { var values []string for i := beginID; i < beginID+batchSize; i++ { value := fmt.Sprintf("(%d, %d, %d, %d)", i, i, i, 0) values = append(values, value) } return fmt.Sprintf("insert %s values %s", tableName, strings.Join(values, ",")) } type Session struct { seID uint64 isPessimistic bool conn *sql.Conn stmts []func(ctx context.Context) error ran *randIDGenerator addedCount int txnStart time.Time commitStart time.Time } func NewSession(db *sql.DB, seID, maxSize uint64, numPartitions uint64) (*Session, error) { ctx := context.Background() con, err := db.Conn(ctx) if err != nil { return nil, err } se := &Session{ seID: seID, conn: con, ran: newRandIDGenerator(maxSize, numPartitions), } switch *mode { case "pessimistic": se.isPessimistic = true case "mix": se.isPessimistic = se.seID%2 == 1 } se.stmts = append(se.stmts, se.updateIndex, se.updateIndexRange, se.updateRange, se.updateUniqueIndex, se.replace, se.deleteInsert, se.selectForUpdate, se.plainSelect, ) return se, nil } func (se *Session) runTransaction(parent context.Context) error { se.reset() ctx, cancel := context.WithTimeout(parent, time.Minute) defer cancel() beginSQL := "begin /*!90000 optimistic */" if se.isPessimistic { beginSQL = "begin /*!90000 pessimistic */" } _, err := se.conn.ExecContext(ctx, beginSQL) if err != nil { return err } numStmts := 1 + se.ran.uniform.Intn(5) for i := 0; i < numStmts; i++ { stmtType := se.ran.uniform.Intn(len(se.stmts)) f := se.stmts[stmtType] err = f(ctx) if err != nil { se.handleError(ctx, err, false) return nil } } se.commitStart = time.Now() _, err = se.conn.ExecContext(ctx, "commit") if err != nil { se.handleError(ctx, err, true) } else { atomic.AddUint64(&successTxn, 1) } return nil } func getErrorCode(err error) int { var code int _, err1 := fmt.Sscanf(err.Error(), "Error %d:", &code) if err1 != nil { return -1 } return code } func (se *Session) handleError(ctx context.Context, err error, isCommit bool) { atomic.AddUint64(&failTxn, 1) _, _ = se.conn.ExecContext(ctx, "rollback") code := getErrorCode(err) ignoreCodes := ignoreCodesO if se.isPessimistic { ignoreCodes = ignoreCodesP } for _, ignoreCode := range ignoreCodes { if ignoreCode == code { return } } txnMode := "optimistic" if se.isPessimistic { txnMode = "pessimistic" } if isCommit { log.Println(txnMode, "txnDur", time.Since(se.txnStart), "commitDur", time.Since(se.commitStart), err) } else { log.Println(txnMode, se.isPessimistic, "txnDur", time.Since(se.txnStart), err) } } func (se *Session) Run(wg *sync.WaitGroup) { defer wg.Done() ctx := context.Background() runFunc := se.runTransaction for { if err := runFunc(ctx); err != nil { log.Println("begin error", err) return } } } func (se *Session) deleteInsert(ctx context.Context) error { return nil // TODO: add deleteInsert for multiple tables //rowID := se.ran.nextRowID() //row := se.conn.QueryRowContext(ctx, fmt.Sprintf("select c from t where id = %d for update", rowID)) //var cnt int64 //err := row.Scan(&cnt) //if err != nil { // return err //} //if err = se.executeDML(ctx, "delete from t where id = %d", rowID); err != nil { // return err //} //return se.executeDML(ctx, "insert t values (%d, %d, %d, %d)", // rowID, se.ran.nextUniqueIndex(), rowID, cnt+2) } func (se *Session) replace(ctx context.Context) error { return nil // TODO: add replace for multiple tables //rowID := se.ran.nextRowID() //row := se.conn.QueryRowContext(ctx, fmt.Sprintf("select c from t where id = %d for update", rowID)) //var cnt int64 //err := row.Scan(&cnt) //if err != nil { // return err //} //// When replace on existing records, the semantic is equal to `delete then insert`, so the cnt //return se.executeDML(ctx, "replace t values (%d, %d, %d, %d)", // rowID, se.ran.nextUniqueIndex(), rowID, cnt+2) } func (se *Session) updateSimple(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() if err := se.executeDML(ctx, "update t%d set c = c - 1 where id = %d", fromTableID, fromRowID); err != nil { return err } return se.executeDML(ctx, "update t%d set c = c + 1 where id = %d", toTableID, toRowID) } func (se *Session) updateIndex(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() if err := se.executeDML(ctx, "update t%d set i = i + 1, c = c - 1 where id = %d", fromTableID, fromRowID); err != nil { return err } return se.executeDML(ctx, "update t%d set i = i + 1, c = c + 1 where id = %d", toTableID, toRowID) } // updateUniqueIndex make sure there is no conflict on the unique index by randomly generate the unique index value func (se *Session) updateUniqueIndex(ctx context.Context) error { return se.executeDML(ctx, "update t%d set u = %d, c = c where id = %d", se.ran.nextTableID(), se.ran.nextUniqueIndex(), se.ran.nextRowID()) } func (se *Session) updateRange(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() beginRowID := mathutil.MinUint64(fromRowID, toRowID) + 2 if err := se.executeDML(ctx, "update t%d set c = c - 1 where id between %d and %d", fromTableID, beginRowID, beginRowID+8); err != nil { return err } return se.executeDML(ctx, "update t%d set c = c + 1 where id between %d and %d", toTableID, beginRowID, beginRowID+8) } func (se *Session) updateIndexRange(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() beginRowID := mathutil.MinUint64(fromRowID, toRowID) + 2 if err := se.executeDML(ctx, "update t%d set i = i + 1, c = c - 1 where id between %d and %d", fromTableID, beginRowID, beginRowID+8); err != nil { return err } return se.executeDML(ctx, "update t%d set i = i + 1, c = c + 1 where id between %d and %d", toTableID, beginRowID, beginRowID+8) } func (se *Session) plainSelect(ctx context.Context) error { tableID := se.ran.nextTableID() beginRowID := se.ran.nextRowID() endRowID := beginRowID + 10 return se.executeSelect(ctx, "select * from t%d where id between %d and %d", tableID, beginRowID, endRowID) } func (se *Session) selectForUpdate(ctx context.Context) error { return se.executeSelect(ctx, "select * from t%d where id in (%d, %d) for update", se.ran.nextTableID(), se.ran.nextRowID(), se.ran.nextRowID()) } func (se *Session) executeDML(ctx context.Context, sqlFormat string, args ...interface{}) error { sql := fmt.Sprintf(sqlFormat, args...) res, err := se.conn.ExecContext(ctx, sql) if err != nil { return err } affected, err := res.RowsAffected() if err != nil { return err } if affected == 0 { return errors.New("affected row is 0, " + sql) } se.addedCount += int(affected) return nil } func (se *Session) executeSelect(ctx context.Context, sqlFormat string, args ...interface{}) error { sql := fmt.Sprintf(sqlFormat, args...) rows, err := se.conn.QueryContext(ctx, sql) if err != nil { return err } for rows.Next() { } if rows.Err() != nil { return rows.Err() } return rows.Close() } func (se *Session) executeCommit(ctx context.Context) error { _, err := se.conn.ExecContext(ctx, "commit") return err } func (se *Session) reset() { se.ran.allocated = map[uint64]struct{}{} se.addedCount = 0 se.txnStart = time.Now() } // randIDGenerator generates random ID that combines round-robin and zipf distribution and make sure not duplicated. type randIDGenerator struct { allocated map[uint64]struct{} zipf *rand.Zipf uniform *rand.Rand tableNum int max uint64 partitionSize uint64 numPartitions uint64 partitionIdx uint64 } func (r *randIDGenerator) nextTableAndRowPairs() (fromTableID, toTableID int, fromRowID, toRowID uint64) { fromTableID = r.nextTableID() toTableID = r.nextTableID() fromRowID = r.nextRowID() toRowID = r.nextRowID() return } func (r *randIDGenerator) nextTableID() int { return r.uniform.Intn(r.tableNum) } func (r *randIDGenerator) nextRowID() uint64 { return r.uniform.Uint64() % *tableSize } func (r *randIDGenerator) reset() { r.allocated = map[uint64]struct{}{} } func (r *randIDGenerator) nextNumStatements(n int) int { return r.uniform.Intn(n) } func (r *randIDGenerator) nextUniqueIndex() uint64 { return r.uniform.Uint64() } func newRandIDGenerator(maxSize uint64, numPartitions uint64) *randIDGenerator { partitionSize := maxSize / numPartitions src := rand.NewSource(time.Now().UnixNano()) ran := rand.New(src) zipf := rand.NewZipf(ran, 1.01, 1, partitionSize-1) return &randIDGenerator{ allocated: map[uint64]struct{}{}, zipf: zipf, uniform: ran, max: maxSize, tableNum: *tables, partitionSize: maxSize / numPartitions, numPartitions: numPartitions, } } func
() { ticker := time.NewTicker(time.Second * 10) lastSuccess := uint64(0) lastFail := uint64(0) for { <-ticker.C curSuccess := atomic.LoadUint64(&successTxn) curFail := atomic.LoadUint64(&failTxn) log.Printf("tps(success:%v fail:%v)\n", float64(curSuccess-lastSuccess)/10, float64(curFail-lastFail)/10) lastSuccess = curSuccess lastFail = curFail } } func checkLoop(db *sql.DB) { ctx := context.Background() conn, err := db.Conn(ctx) if err != nil { panic(err) } ticker := time.NewTicker(time.Second * 30) tableNames := make([]string, *tables) for i := 0; i < *tables; i++ { tableNames[i] = fmt.Sprintf("select c from t%d", i) } allSumStmt := fmt.Sprintf("select sum(c) from (%s) tall", strings.Join(tableNames, " union all ")) for { <-ticker.C checkCount(conn, allSumStmt, 0) for i := 0; i < *tables; i++ { checkCount(conn, fmt.Sprintf("select count(*) from t%d use index (i)", i), int64(*tableSize)) checkCount(conn, fmt.Sprintf("select count(*) from t%d use index (u)", i), int64(*tableSize)) } } } func checkCount(conn *sql.Conn, sql string, expected int64) { row := conn.QueryRowContext(context.Background(), sql) var c int64 if err := row.Scan(&c); err != nil { log.Println("check", sql, err) } else { if c != expected { panic(fmt.Sprintf("data inconsistency, %s is %d, expecte %d", sql, c, expected)) } } }
statsLoop
identifier_name
test_pessismistic_multi_tables.go
package main import ( "context" "database/sql" "errors" "flag" "fmt" "log" "math/rand" "strconv" "strings" "sync" "sync/atomic" "time" _ "github.com/go-sql-driver/mysql" "modernc.org/mathutil" ) var ( addr = flag.String("addr", "127.0.0.1:4000", "tidb address") concurrency = flag.Int("concurrency", 32, "concurrency") loadData = flag.Bool("load-data", false, "load data before run") tableSize = flag.Uint64("table-size", 400000, "table size") ignoreO = flag.String("ignore-o", "9007,1105", "ignored error code for optimistic transaction, separated by comma") ignoreP = flag.String("ignore-p", "1213", "ignored error code for pessimistic transaction, separated by comma") mode = flag.String("mode", "mix", "transaction mode, mix|pessimistic|optimistic") tables = flag.Int("tables", 4, "number of test tables") insertDelete = flag.Bool("insert-delete", false, "run insert delete transactions") ignoreCodesO []int ignoreCodesP []int successTxn uint64 failTxn uint64 ) const numPartitions = 4 func main() { flag.Parse() parts := strings.Split(*ignoreP, ",") for _, part := range parts { iv, _ := strconv.Atoi(part) ignoreCodesP = append(ignoreCodesP, iv) } parts = strings.Split(*ignoreO, ",") for _, part := range parts { iv, _ := strconv.Atoi(part) ignoreCodesO = append(ignoreCodesO, iv) } db, err := sql.Open("mysql", "root:@tcp("+*addr+")/test") if err != nil { log.Fatal(err) } if *loadData { err = LoadData(db, *tableSize) if err != nil { log.Fatal(err) } return } wg := new(sync.WaitGroup) wg.Add(*concurrency) for i := 0; i < *concurrency; i++
if !*insertDelete { go checkLoop(db) } go statsLoop() wg.Wait() } const batchSize = 100 func LoadData(db *sql.DB, maxSize uint64) error { for i := 0; i < *tables; i++ { tableName := fmt.Sprintf("t%d", i) log.Printf("loading table: %s\n", tableName) if _, err := db.Exec(fmt.Sprintf("drop table if exists %s", tableName)); err != nil { return nil } createTableStmt := fmt.Sprintf("create table %s ("+ "id bigint primary key,"+ "u bigint unsigned unique,"+ "i bigint, c bigint,"+ "index i (i))", tableName) if _, err := db.Exec(createTableStmt); err != nil { return err } for i := uint64(0); i < maxSize; i += batchSize { if _, err := db.Exec(insertSql(tableName, i)); err != nil { return err } } } return nil } func insertSql(tableName string, beginID uint64) string { var values []string for i := beginID; i < beginID+batchSize; i++ { value := fmt.Sprintf("(%d, %d, %d, %d)", i, i, i, 0) values = append(values, value) } return fmt.Sprintf("insert %s values %s", tableName, strings.Join(values, ",")) } type Session struct { seID uint64 isPessimistic bool conn *sql.Conn stmts []func(ctx context.Context) error ran *randIDGenerator addedCount int txnStart time.Time commitStart time.Time } func NewSession(db *sql.DB, seID, maxSize uint64, numPartitions uint64) (*Session, error) { ctx := context.Background() con, err := db.Conn(ctx) if err != nil { return nil, err } se := &Session{ seID: seID, conn: con, ran: newRandIDGenerator(maxSize, numPartitions), } switch *mode { case "pessimistic": se.isPessimistic = true case "mix": se.isPessimistic = se.seID%2 == 1 } se.stmts = append(se.stmts, se.updateIndex, se.updateIndexRange, se.updateRange, se.updateUniqueIndex, se.replace, se.deleteInsert, se.selectForUpdate, se.plainSelect, ) return se, nil } func (se *Session) runTransaction(parent context.Context) error { se.reset() ctx, cancel := context.WithTimeout(parent, time.Minute) defer cancel() beginSQL := "begin /*!90000 optimistic */" if se.isPessimistic { beginSQL = "begin /*!90000 pessimistic */" } _, err := se.conn.ExecContext(ctx, beginSQL) if err != nil { return err } numStmts := 1 + se.ran.uniform.Intn(5) for i := 0; i < numStmts; i++ { stmtType := se.ran.uniform.Intn(len(se.stmts)) f := se.stmts[stmtType] err = f(ctx) if err != nil { se.handleError(ctx, err, false) return nil } } se.commitStart = time.Now() _, err = se.conn.ExecContext(ctx, "commit") if err != nil { se.handleError(ctx, err, true) } else { atomic.AddUint64(&successTxn, 1) } return nil } func getErrorCode(err error) int { var code int _, err1 := fmt.Sscanf(err.Error(), "Error %d:", &code) if err1 != nil { return -1 } return code } func (se *Session) handleError(ctx context.Context, err error, isCommit bool) { atomic.AddUint64(&failTxn, 1) _, _ = se.conn.ExecContext(ctx, "rollback") code := getErrorCode(err) ignoreCodes := ignoreCodesO if se.isPessimistic { ignoreCodes = ignoreCodesP } for _, ignoreCode := range ignoreCodes { if ignoreCode == code { return } } txnMode := "optimistic" if se.isPessimistic { txnMode = "pessimistic" } if isCommit { log.Println(txnMode, "txnDur", time.Since(se.txnStart), "commitDur", time.Since(se.commitStart), err) } else { log.Println(txnMode, se.isPessimistic, "txnDur", time.Since(se.txnStart), err) } } func (se *Session) Run(wg *sync.WaitGroup) { defer wg.Done() ctx := context.Background() runFunc := se.runTransaction for { if err := runFunc(ctx); err != nil { log.Println("begin error", err) return } } } func (se *Session) deleteInsert(ctx context.Context) error { return nil // TODO: add deleteInsert for multiple tables //rowID := se.ran.nextRowID() //row := se.conn.QueryRowContext(ctx, fmt.Sprintf("select c from t where id = %d for update", rowID)) //var cnt int64 //err := row.Scan(&cnt) //if err != nil { // return err //} //if err = se.executeDML(ctx, "delete from t where id = %d", rowID); err != nil { // return err //} //return se.executeDML(ctx, "insert t values (%d, %d, %d, %d)", // rowID, se.ran.nextUniqueIndex(), rowID, cnt+2) } func (se *Session) replace(ctx context.Context) error { return nil // TODO: add replace for multiple tables //rowID := se.ran.nextRowID() //row := se.conn.QueryRowContext(ctx, fmt.Sprintf("select c from t where id = %d for update", rowID)) //var cnt int64 //err := row.Scan(&cnt) //if err != nil { // return err //} //// When replace on existing records, the semantic is equal to `delete then insert`, so the cnt //return se.executeDML(ctx, "replace t values (%d, %d, %d, %d)", // rowID, se.ran.nextUniqueIndex(), rowID, cnt+2) } func (se *Session) updateSimple(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() if err := se.executeDML(ctx, "update t%d set c = c - 1 where id = %d", fromTableID, fromRowID); err != nil { return err } return se.executeDML(ctx, "update t%d set c = c + 1 where id = %d", toTableID, toRowID) } func (se *Session) updateIndex(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() if err := se.executeDML(ctx, "update t%d set i = i + 1, c = c - 1 where id = %d", fromTableID, fromRowID); err != nil { return err } return se.executeDML(ctx, "update t%d set i = i + 1, c = c + 1 where id = %d", toTableID, toRowID) } // updateUniqueIndex make sure there is no conflict on the unique index by randomly generate the unique index value func (se *Session) updateUniqueIndex(ctx context.Context) error { return se.executeDML(ctx, "update t%d set u = %d, c = c where id = %d", se.ran.nextTableID(), se.ran.nextUniqueIndex(), se.ran.nextRowID()) } func (se *Session) updateRange(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() beginRowID := mathutil.MinUint64(fromRowID, toRowID) + 2 if err := se.executeDML(ctx, "update t%d set c = c - 1 where id between %d and %d", fromTableID, beginRowID, beginRowID+8); err != nil { return err } return se.executeDML(ctx, "update t%d set c = c + 1 where id between %d and %d", toTableID, beginRowID, beginRowID+8) } func (se *Session) updateIndexRange(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() beginRowID := mathutil.MinUint64(fromRowID, toRowID) + 2 if err := se.executeDML(ctx, "update t%d set i = i + 1, c = c - 1 where id between %d and %d", fromTableID, beginRowID, beginRowID+8); err != nil { return err } return se.executeDML(ctx, "update t%d set i = i + 1, c = c + 1 where id between %d and %d", toTableID, beginRowID, beginRowID+8) } func (se *Session) plainSelect(ctx context.Context) error { tableID := se.ran.nextTableID() beginRowID := se.ran.nextRowID() endRowID := beginRowID + 10 return se.executeSelect(ctx, "select * from t%d where id between %d and %d", tableID, beginRowID, endRowID) } func (se *Session) selectForUpdate(ctx context.Context) error { return se.executeSelect(ctx, "select * from t%d where id in (%d, %d) for update", se.ran.nextTableID(), se.ran.nextRowID(), se.ran.nextRowID()) } func (se *Session) executeDML(ctx context.Context, sqlFormat string, args ...interface{}) error { sql := fmt.Sprintf(sqlFormat, args...) res, err := se.conn.ExecContext(ctx, sql) if err != nil { return err } affected, err := res.RowsAffected() if err != nil { return err } if affected == 0 { return errors.New("affected row is 0, " + sql) } se.addedCount += int(affected) return nil } func (se *Session) executeSelect(ctx context.Context, sqlFormat string, args ...interface{}) error { sql := fmt.Sprintf(sqlFormat, args...) rows, err := se.conn.QueryContext(ctx, sql) if err != nil { return err } for rows.Next() { } if rows.Err() != nil { return rows.Err() } return rows.Close() } func (se *Session) executeCommit(ctx context.Context) error { _, err := se.conn.ExecContext(ctx, "commit") return err } func (se *Session) reset() { se.ran.allocated = map[uint64]struct{}{} se.addedCount = 0 se.txnStart = time.Now() } // randIDGenerator generates random ID that combines round-robin and zipf distribution and make sure not duplicated. type randIDGenerator struct { allocated map[uint64]struct{} zipf *rand.Zipf uniform *rand.Rand tableNum int max uint64 partitionSize uint64 numPartitions uint64 partitionIdx uint64 } func (r *randIDGenerator) nextTableAndRowPairs() (fromTableID, toTableID int, fromRowID, toRowID uint64) { fromTableID = r.nextTableID() toTableID = r.nextTableID() fromRowID = r.nextRowID() toRowID = r.nextRowID() return } func (r *randIDGenerator) nextTableID() int { return r.uniform.Intn(r.tableNum) } func (r *randIDGenerator) nextRowID() uint64 { return r.uniform.Uint64() % *tableSize } func (r *randIDGenerator) reset() { r.allocated = map[uint64]struct{}{} } func (r *randIDGenerator) nextNumStatements(n int) int { return r.uniform.Intn(n) } func (r *randIDGenerator) nextUniqueIndex() uint64 { return r.uniform.Uint64() } func newRandIDGenerator(maxSize uint64, numPartitions uint64) *randIDGenerator { partitionSize := maxSize / numPartitions src := rand.NewSource(time.Now().UnixNano()) ran := rand.New(src) zipf := rand.NewZipf(ran, 1.01, 1, partitionSize-1) return &randIDGenerator{ allocated: map[uint64]struct{}{}, zipf: zipf, uniform: ran, max: maxSize, tableNum: *tables, partitionSize: maxSize / numPartitions, numPartitions: numPartitions, } } func statsLoop() { ticker := time.NewTicker(time.Second * 10) lastSuccess := uint64(0) lastFail := uint64(0) for { <-ticker.C curSuccess := atomic.LoadUint64(&successTxn) curFail := atomic.LoadUint64(&failTxn) log.Printf("tps(success:%v fail:%v)\n", float64(curSuccess-lastSuccess)/10, float64(curFail-lastFail)/10) lastSuccess = curSuccess lastFail = curFail } } func checkLoop(db *sql.DB) { ctx := context.Background() conn, err := db.Conn(ctx) if err != nil { panic(err) } ticker := time.NewTicker(time.Second * 30) tableNames := make([]string, *tables) for i := 0; i < *tables; i++ { tableNames[i] = fmt.Sprintf("select c from t%d", i) } allSumStmt := fmt.Sprintf("select sum(c) from (%s) tall", strings.Join(tableNames, " union all ")) for { <-ticker.C checkCount(conn, allSumStmt, 0) for i := 0; i < *tables; i++ { checkCount(conn, fmt.Sprintf("select count(*) from t%d use index (i)", i), int64(*tableSize)) checkCount(conn, fmt.Sprintf("select count(*) from t%d use index (u)", i), int64(*tableSize)) } } } func checkCount(conn *sql.Conn, sql string, expected int64) { row := conn.QueryRowContext(context.Background(), sql) var c int64 if err := row.Scan(&c); err != nil { log.Println("check", sql, err) } else { if c != expected { panic(fmt.Sprintf("data inconsistency, %s is %d, expecte %d", sql, c, expected)) } } }
{ se, err := NewSession(db, uint64(i), *tableSize, numPartitions) if err != nil { log.Fatal(err) } go se.Run(wg) }
conditional_block
test_pessismistic_multi_tables.go
package main import ( "context" "database/sql" "errors" "flag" "fmt" "log" "math/rand" "strconv" "strings" "sync" "sync/atomic" "time" _ "github.com/go-sql-driver/mysql" "modernc.org/mathutil" ) var ( addr = flag.String("addr", "127.0.0.1:4000", "tidb address") concurrency = flag.Int("concurrency", 32, "concurrency") loadData = flag.Bool("load-data", false, "load data before run") tableSize = flag.Uint64("table-size", 400000, "table size") ignoreO = flag.String("ignore-o", "9007,1105", "ignored error code for optimistic transaction, separated by comma") ignoreP = flag.String("ignore-p", "1213", "ignored error code for pessimistic transaction, separated by comma") mode = flag.String("mode", "mix", "transaction mode, mix|pessimistic|optimistic") tables = flag.Int("tables", 4, "number of test tables") insertDelete = flag.Bool("insert-delete", false, "run insert delete transactions") ignoreCodesO []int ignoreCodesP []int successTxn uint64 failTxn uint64 ) const numPartitions = 4 func main() { flag.Parse() parts := strings.Split(*ignoreP, ",") for _, part := range parts { iv, _ := strconv.Atoi(part) ignoreCodesP = append(ignoreCodesP, iv) } parts = strings.Split(*ignoreO, ",") for _, part := range parts { iv, _ := strconv.Atoi(part) ignoreCodesO = append(ignoreCodesO, iv) } db, err := sql.Open("mysql", "root:@tcp("+*addr+")/test") if err != nil { log.Fatal(err) } if *loadData { err = LoadData(db, *tableSize) if err != nil { log.Fatal(err) } return } wg := new(sync.WaitGroup) wg.Add(*concurrency) for i := 0; i < *concurrency; i++ { se, err := NewSession(db, uint64(i), *tableSize, numPartitions) if err != nil { log.Fatal(err) } go se.Run(wg) } if !*insertDelete { go checkLoop(db) } go statsLoop() wg.Wait() } const batchSize = 100 func LoadData(db *sql.DB, maxSize uint64) error { for i := 0; i < *tables; i++ { tableName := fmt.Sprintf("t%d", i) log.Printf("loading table: %s\n", tableName) if _, err := db.Exec(fmt.Sprintf("drop table if exists %s", tableName)); err != nil { return nil } createTableStmt := fmt.Sprintf("create table %s ("+ "id bigint primary key,"+ "u bigint unsigned unique,"+ "i bigint, c bigint,"+ "index i (i))", tableName) if _, err := db.Exec(createTableStmt); err != nil { return err } for i := uint64(0); i < maxSize; i += batchSize { if _, err := db.Exec(insertSql(tableName, i)); err != nil { return err } } } return nil } func insertSql(tableName string, beginID uint64) string { var values []string for i := beginID; i < beginID+batchSize; i++ { value := fmt.Sprintf("(%d, %d, %d, %d)", i, i, i, 0) values = append(values, value) } return fmt.Sprintf("insert %s values %s", tableName, strings.Join(values, ",")) } type Session struct { seID uint64 isPessimistic bool conn *sql.Conn stmts []func(ctx context.Context) error ran *randIDGenerator addedCount int txnStart time.Time commitStart time.Time } func NewSession(db *sql.DB, seID, maxSize uint64, numPartitions uint64) (*Session, error) { ctx := context.Background() con, err := db.Conn(ctx) if err != nil { return nil, err } se := &Session{ seID: seID, conn: con, ran: newRandIDGenerator(maxSize, numPartitions), } switch *mode { case "pessimistic": se.isPessimistic = true case "mix": se.isPessimistic = se.seID%2 == 1 } se.stmts = append(se.stmts, se.updateIndex, se.updateIndexRange, se.updateRange, se.updateUniqueIndex, se.replace, se.deleteInsert, se.selectForUpdate, se.plainSelect, ) return se, nil } func (se *Session) runTransaction(parent context.Context) error { se.reset() ctx, cancel := context.WithTimeout(parent, time.Minute)
} _, err := se.conn.ExecContext(ctx, beginSQL) if err != nil { return err } numStmts := 1 + se.ran.uniform.Intn(5) for i := 0; i < numStmts; i++ { stmtType := se.ran.uniform.Intn(len(se.stmts)) f := se.stmts[stmtType] err = f(ctx) if err != nil { se.handleError(ctx, err, false) return nil } } se.commitStart = time.Now() _, err = se.conn.ExecContext(ctx, "commit") if err != nil { se.handleError(ctx, err, true) } else { atomic.AddUint64(&successTxn, 1) } return nil } func getErrorCode(err error) int { var code int _, err1 := fmt.Sscanf(err.Error(), "Error %d:", &code) if err1 != nil { return -1 } return code } func (se *Session) handleError(ctx context.Context, err error, isCommit bool) { atomic.AddUint64(&failTxn, 1) _, _ = se.conn.ExecContext(ctx, "rollback") code := getErrorCode(err) ignoreCodes := ignoreCodesO if se.isPessimistic { ignoreCodes = ignoreCodesP } for _, ignoreCode := range ignoreCodes { if ignoreCode == code { return } } txnMode := "optimistic" if se.isPessimistic { txnMode = "pessimistic" } if isCommit { log.Println(txnMode, "txnDur", time.Since(se.txnStart), "commitDur", time.Since(se.commitStart), err) } else { log.Println(txnMode, se.isPessimistic, "txnDur", time.Since(se.txnStart), err) } } func (se *Session) Run(wg *sync.WaitGroup) { defer wg.Done() ctx := context.Background() runFunc := se.runTransaction for { if err := runFunc(ctx); err != nil { log.Println("begin error", err) return } } } func (se *Session) deleteInsert(ctx context.Context) error { return nil // TODO: add deleteInsert for multiple tables //rowID := se.ran.nextRowID() //row := se.conn.QueryRowContext(ctx, fmt.Sprintf("select c from t where id = %d for update", rowID)) //var cnt int64 //err := row.Scan(&cnt) //if err != nil { // return err //} //if err = se.executeDML(ctx, "delete from t where id = %d", rowID); err != nil { // return err //} //return se.executeDML(ctx, "insert t values (%d, %d, %d, %d)", // rowID, se.ran.nextUniqueIndex(), rowID, cnt+2) } func (se *Session) replace(ctx context.Context) error { return nil // TODO: add replace for multiple tables //rowID := se.ran.nextRowID() //row := se.conn.QueryRowContext(ctx, fmt.Sprintf("select c from t where id = %d for update", rowID)) //var cnt int64 //err := row.Scan(&cnt) //if err != nil { // return err //} //// When replace on existing records, the semantic is equal to `delete then insert`, so the cnt //return se.executeDML(ctx, "replace t values (%d, %d, %d, %d)", // rowID, se.ran.nextUniqueIndex(), rowID, cnt+2) } func (se *Session) updateSimple(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() if err := se.executeDML(ctx, "update t%d set c = c - 1 where id = %d", fromTableID, fromRowID); err != nil { return err } return se.executeDML(ctx, "update t%d set c = c + 1 where id = %d", toTableID, toRowID) } func (se *Session) updateIndex(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() if err := se.executeDML(ctx, "update t%d set i = i + 1, c = c - 1 where id = %d", fromTableID, fromRowID); err != nil { return err } return se.executeDML(ctx, "update t%d set i = i + 1, c = c + 1 where id = %d", toTableID, toRowID) } // updateUniqueIndex make sure there is no conflict on the unique index by randomly generate the unique index value func (se *Session) updateUniqueIndex(ctx context.Context) error { return se.executeDML(ctx, "update t%d set u = %d, c = c where id = %d", se.ran.nextTableID(), se.ran.nextUniqueIndex(), se.ran.nextRowID()) } func (se *Session) updateRange(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() beginRowID := mathutil.MinUint64(fromRowID, toRowID) + 2 if err := se.executeDML(ctx, "update t%d set c = c - 1 where id between %d and %d", fromTableID, beginRowID, beginRowID+8); err != nil { return err } return se.executeDML(ctx, "update t%d set c = c + 1 where id between %d and %d", toTableID, beginRowID, beginRowID+8) } func (se *Session) updateIndexRange(ctx context.Context) error { fromTableID, toTableID, fromRowID, toRowID := se.ran.nextTableAndRowPairs() beginRowID := mathutil.MinUint64(fromRowID, toRowID) + 2 if err := se.executeDML(ctx, "update t%d set i = i + 1, c = c - 1 where id between %d and %d", fromTableID, beginRowID, beginRowID+8); err != nil { return err } return se.executeDML(ctx, "update t%d set i = i + 1, c = c + 1 where id between %d and %d", toTableID, beginRowID, beginRowID+8) } func (se *Session) plainSelect(ctx context.Context) error { tableID := se.ran.nextTableID() beginRowID := se.ran.nextRowID() endRowID := beginRowID + 10 return se.executeSelect(ctx, "select * from t%d where id between %d and %d", tableID, beginRowID, endRowID) } func (se *Session) selectForUpdate(ctx context.Context) error { return se.executeSelect(ctx, "select * from t%d where id in (%d, %d) for update", se.ran.nextTableID(), se.ran.nextRowID(), se.ran.nextRowID()) } func (se *Session) executeDML(ctx context.Context, sqlFormat string, args ...interface{}) error { sql := fmt.Sprintf(sqlFormat, args...) res, err := se.conn.ExecContext(ctx, sql) if err != nil { return err } affected, err := res.RowsAffected() if err != nil { return err } if affected == 0 { return errors.New("affected row is 0, " + sql) } se.addedCount += int(affected) return nil } func (se *Session) executeSelect(ctx context.Context, sqlFormat string, args ...interface{}) error { sql := fmt.Sprintf(sqlFormat, args...) rows, err := se.conn.QueryContext(ctx, sql) if err != nil { return err } for rows.Next() { } if rows.Err() != nil { return rows.Err() } return rows.Close() } func (se *Session) executeCommit(ctx context.Context) error { _, err := se.conn.ExecContext(ctx, "commit") return err } func (se *Session) reset() { se.ran.allocated = map[uint64]struct{}{} se.addedCount = 0 se.txnStart = time.Now() } // randIDGenerator generates random ID that combines round-robin and zipf distribution and make sure not duplicated. type randIDGenerator struct { allocated map[uint64]struct{} zipf *rand.Zipf uniform *rand.Rand tableNum int max uint64 partitionSize uint64 numPartitions uint64 partitionIdx uint64 } func (r *randIDGenerator) nextTableAndRowPairs() (fromTableID, toTableID int, fromRowID, toRowID uint64) { fromTableID = r.nextTableID() toTableID = r.nextTableID() fromRowID = r.nextRowID() toRowID = r.nextRowID() return } func (r *randIDGenerator) nextTableID() int { return r.uniform.Intn(r.tableNum) } func (r *randIDGenerator) nextRowID() uint64 { return r.uniform.Uint64() % *tableSize } func (r *randIDGenerator) reset() { r.allocated = map[uint64]struct{}{} } func (r *randIDGenerator) nextNumStatements(n int) int { return r.uniform.Intn(n) } func (r *randIDGenerator) nextUniqueIndex() uint64 { return r.uniform.Uint64() } func newRandIDGenerator(maxSize uint64, numPartitions uint64) *randIDGenerator { partitionSize := maxSize / numPartitions src := rand.NewSource(time.Now().UnixNano()) ran := rand.New(src) zipf := rand.NewZipf(ran, 1.01, 1, partitionSize-1) return &randIDGenerator{ allocated: map[uint64]struct{}{}, zipf: zipf, uniform: ran, max: maxSize, tableNum: *tables, partitionSize: maxSize / numPartitions, numPartitions: numPartitions, } } func statsLoop() { ticker := time.NewTicker(time.Second * 10) lastSuccess := uint64(0) lastFail := uint64(0) for { <-ticker.C curSuccess := atomic.LoadUint64(&successTxn) curFail := atomic.LoadUint64(&failTxn) log.Printf("tps(success:%v fail:%v)\n", float64(curSuccess-lastSuccess)/10, float64(curFail-lastFail)/10) lastSuccess = curSuccess lastFail = curFail } } func checkLoop(db *sql.DB) { ctx := context.Background() conn, err := db.Conn(ctx) if err != nil { panic(err) } ticker := time.NewTicker(time.Second * 30) tableNames := make([]string, *tables) for i := 0; i < *tables; i++ { tableNames[i] = fmt.Sprintf("select c from t%d", i) } allSumStmt := fmt.Sprintf("select sum(c) from (%s) tall", strings.Join(tableNames, " union all ")) for { <-ticker.C checkCount(conn, allSumStmt, 0) for i := 0; i < *tables; i++ { checkCount(conn, fmt.Sprintf("select count(*) from t%d use index (i)", i), int64(*tableSize)) checkCount(conn, fmt.Sprintf("select count(*) from t%d use index (u)", i), int64(*tableSize)) } } } func checkCount(conn *sql.Conn, sql string, expected int64) { row := conn.QueryRowContext(context.Background(), sql) var c int64 if err := row.Scan(&c); err != nil { log.Println("check", sql, err) } else { if c != expected { panic(fmt.Sprintf("data inconsistency, %s is %d, expecte %d", sql, c, expected)) } } }
defer cancel() beginSQL := "begin /*!90000 optimistic */" if se.isPessimistic { beginSQL = "begin /*!90000 pessimistic */"
random_line_split
backbone2.py
import numpy as np import fvcore.nn.weight_init as weight_init import torch.nn.functional as F from torch import nn import torch from torch.nn.modules.utils import _pair from detectron2.layers import CNNBlockBase,ShapeSpec,Conv2d,get_norm,FrozenBatchNorm2d from detectron2.layers.deform_conv import deform_conv from detectron2.modeling import Backbone,BACKBONE_REGISTRY cfgs = { 11: [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"], 13: [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"], 16: [64, 64, "M", 128, 128, "M", 256, 256, 256, "M", 512, 512, 512, "M", 512, 512, 512, "M"], 19: [64,64,"M",128,128,"M",256,256,256,256,"M",512,512,512,512,"M",512,512,512,512,"M"], } class Conv2d2(Conv2d): """ A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. """ def freeze(self): """ Make this block not trainable. This method sets all parameters to `requires_grad=False`, and convert all BatchNorm layers to FrozenBatchNorm Returns: the block itself """ for p in self.parameters(): p.requires_grad = False FrozenBatchNorm2d.convert_frozen_batchnorm(self) return self class DeformConv2(CNNBlockBase): def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=True, norm=None, activation=None, ):
def forward(self, x): offset_buffer=self.buffer_offset(x) offset = self.conv2_offset(offset_buffer) out = deform_conv( x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups, ) if self.bias is not None: bias=self.bias.view(1,-1,1,1) out+=bias if self.norm is not None: out = self.norm(out) if self.activation is not None: out = self.activation(out) return x,offset def extra_repr(self): tmpstr = "in_channels=" + str(self.in_channels) tmpstr += ", out_channels=" + str(self.out_channels) tmpstr += ", kernel_size=" + str(self.kernel_size) tmpstr += ", stride=" + str(self.stride) tmpstr += ", padding=" + str(self.padding) tmpstr += ", dilation=" + str(self.dilation) tmpstr += ", groups=" + str(self.groups) tmpstr += ", deformable_groups=" + str(self.deformable_groups) tmpstr += ", bias="+str(True) return tmpstr class VGG(Backbone): def __init__(self, stages, num_classes=None, out_features=None): """ """ super().__init__() self.num_classes = num_classes current_stride = 1 self._out_feature_strides = {} self._out_feature_channels = {} self.stages_and_names = [] for i, block in enumerate(stages): name = "res" + str(i + 1) stage = nn.Sequential(*block) self.add_module(name, stage) self.stages_and_names.append((stage, name)) self._out_feature_strides[name] = current_stride = int( current_stride * 2 ) self._out_feature_channels[name] = block[-1].out_channels if num_classes is not None: self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) for m in self.classifier.modules(): if isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0.01) # nn.init.constant_(m.bias, 0) name = "classifier" if out_features is None: out_features = [name] self._out_features = out_features assert len(self._out_features) children = [x[0] for x in self.named_children()] for out_feature in self._out_features: assert out_feature in children, "Available children: {}".format(", ".join(children)) def forward(self, x): outputs = {} for stage, name in self.stages_and_names: # convert ccp to pcc for x=F.avg_pool2d(x,kernel_size=2, stride=2) x = stage(x) if name in self._out_features: outputs[name] = x if isinstance(x,tuple): x,_=x if self.num_classes is not None: x = self.avgpool(x) x = self.classifier(x) if "classifer" in self._out_features: outputs["classifer"] = x return outputs def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } def freeze(self, freeze_at=0): for idx, (stage, _) in enumerate(self.stages_and_names, start=2): if freeze_at >= idx: for block in stage.children(): block.freeze() return self @property def size_divisibility(self): return 32 @staticmethod def make_stage(block_class, in_channels, out_channels,norm): blocks = [] num_blocks=len(block_class) for i in range(num_blocks): blocks.append( block_class[i]( in_channels=in_channels[i], out_channels=out_channels[i], kernel_size=3, stride=1, padding=1, bias=True, norm=get_norm(norm, out_channels[i]), activation=nn.ReLU(True) ) ) return blocks @BACKBONE_REGISTRY.register() def build_points_collection_vgg_backbone(cfg, input_shape): # fmt: off depth = cfg.MODEL.VGG.DEPTH freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT norm = cfg.MODEL.VGG.NORM out_features = cfg.MODEL.VGG.OUT_FEATURES in_channels = input_shape.channels deform_on_per_stage = cfg.MODEL.VGG.DEFORM_ON_PER_STAGE # fmt: on stages = [] out_stage_idx = [ {"res1": 1, "res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features ] max_stage_idx = max(out_stage_idx) stage_inds = [i for i, x in enumerate(cfgs[depth]) if x == "M"] ind = 0 for idx, stage_idx in enumerate(range(1, max_stage_idx + 1)): channel_cfg=cfgs[depth][ind : stage_inds[idx]] if deform_on_per_stage[idx]: block_class=[Conv2d2 for i in range(len(channel_cfg)-1)]+[DeformConv2] else: block_class=[Conv2d2 for i in range(len(channel_cfg))] stage_kargs = { "block_class": block_class, "in_channels": [in_channels]+channel_cfg[:-1], "out_channels": channel_cfg, "norm": norm, } blocks = VGG.make_stage(**stage_kargs) out_channels = channel_cfg[-1] in_channels = out_channels ind = stage_inds[idx] + 1 stages.append(blocks) return VGG(stages, out_features=out_features).freeze(freeze_at)
""" Deformable convolution from :paper:`deformconv`. Arguments are similar to :class:`Conv2D`. Extra arguments: Args: deformable_groups (int): number of groups used in deformable convolution. norm (nn.Module, optional): a normalization layer activation (callable(Tensor) -> Tensor): a callable activation function """ super(DeformConv2, self).__init__(in_channels,out_channels,stride) assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format( in_channels, groups ) assert ( out_channels % groups == 0 ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.deformable_groups = deformable_groups self.norm = norm self.activation = activation self.weight = nn.Parameter( torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size) ) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.bias = None nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") if self.bias is not None: nn.init.constant_(self.bias, 0) bottleneck_channels=in_channels//2 offset_channels=2*kernel_size**2 self.buffer_offset=nn.Sequential( Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=1, padding=0, dilation=1, ), nn.ReLU(inplace=True), Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=(5,1), stride=stride, padding=(2*dilation,0), dilation=dilation, ), Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=(1,5), stride=stride, padding=(0,2*dilation), dilation=dilation, ), nn.ReLU(inplace=True) ) self.conv2_offset = Conv2d( bottleneck_channels, offset_channels * deformable_groups, kernel_size=3, stride=stride, padding=1 * dilation, dilation=dilation, ) nn.init.constant_(self.conv2_offset.weight, 0) nn.init.constant_(self.conv2_offset.bias, 0)
identifier_body
backbone2.py
import numpy as np import fvcore.nn.weight_init as weight_init import torch.nn.functional as F from torch import nn import torch from torch.nn.modules.utils import _pair from detectron2.layers import CNNBlockBase,ShapeSpec,Conv2d,get_norm,FrozenBatchNorm2d from detectron2.layers.deform_conv import deform_conv from detectron2.modeling import Backbone,BACKBONE_REGISTRY cfgs = { 11: [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"], 13: [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"], 16: [64, 64, "M", 128, 128, "M", 256, 256, 256, "M", 512, 512, 512, "M", 512, 512, 512, "M"], 19: [64,64,"M",128,128,"M",256,256,256,256,"M",512,512,512,512,"M",512,512,512,512,"M"], } class Conv2d2(Conv2d): """ A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. """ def freeze(self): """ Make this block not trainable. This method sets all parameters to `requires_grad=False`, and convert all BatchNorm layers to FrozenBatchNorm Returns: the block itself """ for p in self.parameters(): p.requires_grad = False FrozenBatchNorm2d.convert_frozen_batchnorm(self) return self class DeformConv2(CNNBlockBase): def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=True, norm=None, activation=None, ): """ Deformable convolution from :paper:`deformconv`. Arguments are similar to :class:`Conv2D`. Extra arguments: Args: deformable_groups (int): number of groups used in deformable convolution. norm (nn.Module, optional): a normalization layer activation (callable(Tensor) -> Tensor): a callable activation function """ super(DeformConv2, self).__init__(in_channels,out_channels,stride) assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format( in_channels, groups ) assert ( out_channels % groups == 0 ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.deformable_groups = deformable_groups self.norm = norm self.activation = activation self.weight = nn.Parameter( torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size) ) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.bias = None nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") if self.bias is not None: nn.init.constant_(self.bias, 0) bottleneck_channels=in_channels//2 offset_channels=2*kernel_size**2 self.buffer_offset=nn.Sequential( Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=1, padding=0, dilation=1, ), nn.ReLU(inplace=True), Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=(5,1), stride=stride, padding=(2*dilation,0), dilation=dilation, ), Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=(1,5), stride=stride, padding=(0,2*dilation), dilation=dilation, ), nn.ReLU(inplace=True) ) self.conv2_offset = Conv2d( bottleneck_channels, offset_channels * deformable_groups, kernel_size=3, stride=stride, padding=1 * dilation, dilation=dilation, ) nn.init.constant_(self.conv2_offset.weight, 0) nn.init.constant_(self.conv2_offset.bias, 0) def forward(self, x): offset_buffer=self.buffer_offset(x) offset = self.conv2_offset(offset_buffer) out = deform_conv( x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups, ) if self.bias is not None: bias=self.bias.view(1,-1,1,1) out+=bias if self.norm is not None: out = self.norm(out) if self.activation is not None: out = self.activation(out) return x,offset def extra_repr(self): tmpstr = "in_channels=" + str(self.in_channels) tmpstr += ", out_channels=" + str(self.out_channels) tmpstr += ", kernel_size=" + str(self.kernel_size) tmpstr += ", stride=" + str(self.stride) tmpstr += ", padding=" + str(self.padding) tmpstr += ", dilation=" + str(self.dilation) tmpstr += ", groups=" + str(self.groups) tmpstr += ", deformable_groups=" + str(self.deformable_groups) tmpstr += ", bias="+str(True) return tmpstr class VGG(Backbone): def __init__(self, stages, num_classes=None, out_features=None): """ """ super().__init__() self.num_classes = num_classes current_stride = 1 self._out_feature_strides = {} self._out_feature_channels = {} self.stages_and_names = [] for i, block in enumerate(stages): name = "res" + str(i + 1) stage = nn.Sequential(*block) self.add_module(name, stage) self.stages_and_names.append((stage, name)) self._out_feature_strides[name] = current_stride = int( current_stride * 2 ) self._out_feature_channels[name] = block[-1].out_channels if num_classes is not None: self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) for m in self.classifier.modules(): if isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0.01) # nn.init.constant_(m.bias, 0) name = "classifier" if out_features is None: out_features = [name] self._out_features = out_features assert len(self._out_features) children = [x[0] for x in self.named_children()] for out_feature in self._out_features: assert out_feature in children, "Available children: {}".format(", ".join(children)) def forward(self, x): outputs = {} for stage, name in self.stages_and_names: # convert ccp to pcc for x=F.avg_pool2d(x,kernel_size=2, stride=2) x = stage(x) if name in self._out_features:
if isinstance(x,tuple): x,_=x if self.num_classes is not None: x = self.avgpool(x) x = self.classifier(x) if "classifer" in self._out_features: outputs["classifer"] = x return outputs def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } def freeze(self, freeze_at=0): for idx, (stage, _) in enumerate(self.stages_and_names, start=2): if freeze_at >= idx: for block in stage.children(): block.freeze() return self @property def size_divisibility(self): return 32 @staticmethod def make_stage(block_class, in_channels, out_channels,norm): blocks = [] num_blocks=len(block_class) for i in range(num_blocks): blocks.append( block_class[i]( in_channels=in_channels[i], out_channels=out_channels[i], kernel_size=3, stride=1, padding=1, bias=True, norm=get_norm(norm, out_channels[i]), activation=nn.ReLU(True) ) ) return blocks @BACKBONE_REGISTRY.register() def build_points_collection_vgg_backbone(cfg, input_shape): # fmt: off depth = cfg.MODEL.VGG.DEPTH freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT norm = cfg.MODEL.VGG.NORM out_features = cfg.MODEL.VGG.OUT_FEATURES in_channels = input_shape.channels deform_on_per_stage = cfg.MODEL.VGG.DEFORM_ON_PER_STAGE # fmt: on stages = [] out_stage_idx = [ {"res1": 1, "res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features ] max_stage_idx = max(out_stage_idx) stage_inds = [i for i, x in enumerate(cfgs[depth]) if x == "M"] ind = 0 for idx, stage_idx in enumerate(range(1, max_stage_idx + 1)): channel_cfg=cfgs[depth][ind : stage_inds[idx]] if deform_on_per_stage[idx]: block_class=[Conv2d2 for i in range(len(channel_cfg)-1)]+[DeformConv2] else: block_class=[Conv2d2 for i in range(len(channel_cfg))] stage_kargs = { "block_class": block_class, "in_channels": [in_channels]+channel_cfg[:-1], "out_channels": channel_cfg, "norm": norm, } blocks = VGG.make_stage(**stage_kargs) out_channels = channel_cfg[-1] in_channels = out_channels ind = stage_inds[idx] + 1 stages.append(blocks) return VGG(stages, out_features=out_features).freeze(freeze_at)
outputs[name] = x
conditional_block
backbone2.py
import numpy as np import fvcore.nn.weight_init as weight_init import torch.nn.functional as F from torch import nn import torch from torch.nn.modules.utils import _pair from detectron2.layers import CNNBlockBase,ShapeSpec,Conv2d,get_norm,FrozenBatchNorm2d from detectron2.layers.deform_conv import deform_conv from detectron2.modeling import Backbone,BACKBONE_REGISTRY cfgs = { 11: [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"], 13: [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"], 16: [64, 64, "M", 128, 128, "M", 256, 256, 256, "M", 512, 512, 512, "M", 512, 512, 512, "M"], 19: [64,64,"M",128,128,"M",256,256,256,256,"M",512,512,512,512,"M",512,512,512,512,"M"], } class Conv2d2(Conv2d): """ A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. """ def freeze(self): """ Make this block not trainable. This method sets all parameters to `requires_grad=False`, and convert all BatchNorm layers to FrozenBatchNorm Returns: the block itself """ for p in self.parameters(): p.requires_grad = False FrozenBatchNorm2d.convert_frozen_batchnorm(self) return self class DeformConv2(CNNBlockBase): def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=True, norm=None, activation=None, ): """ Deformable convolution from :paper:`deformconv`. Arguments are similar to :class:`Conv2D`. Extra arguments: Args: deformable_groups (int): number of groups used in deformable convolution. norm (nn.Module, optional): a normalization layer activation (callable(Tensor) -> Tensor): a callable activation function """ super(DeformConv2, self).__init__(in_channels,out_channels,stride) assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format( in_channels, groups ) assert ( out_channels % groups == 0 ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.deformable_groups = deformable_groups self.norm = norm self.activation = activation self.weight = nn.Parameter( torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size) ) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.bias = None nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") if self.bias is not None: nn.init.constant_(self.bias, 0) bottleneck_channels=in_channels//2 offset_channels=2*kernel_size**2 self.buffer_offset=nn.Sequential( Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=1, padding=0, dilation=1, ), nn.ReLU(inplace=True), Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=(5,1), stride=stride, padding=(2*dilation,0), dilation=dilation, ), Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=(1,5), stride=stride, padding=(0,2*dilation), dilation=dilation, ), nn.ReLU(inplace=True) ) self.conv2_offset = Conv2d( bottleneck_channels, offset_channels * deformable_groups, kernel_size=3, stride=stride, padding=1 * dilation, dilation=dilation, ) nn.init.constant_(self.conv2_offset.weight, 0) nn.init.constant_(self.conv2_offset.bias, 0) def forward(self, x): offset_buffer=self.buffer_offset(x) offset = self.conv2_offset(offset_buffer) out = deform_conv( x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups, ) if self.bias is not None: bias=self.bias.view(1,-1,1,1) out+=bias if self.norm is not None: out = self.norm(out) if self.activation is not None: out = self.activation(out) return x,offset def
(self): tmpstr = "in_channels=" + str(self.in_channels) tmpstr += ", out_channels=" + str(self.out_channels) tmpstr += ", kernel_size=" + str(self.kernel_size) tmpstr += ", stride=" + str(self.stride) tmpstr += ", padding=" + str(self.padding) tmpstr += ", dilation=" + str(self.dilation) tmpstr += ", groups=" + str(self.groups) tmpstr += ", deformable_groups=" + str(self.deformable_groups) tmpstr += ", bias="+str(True) return tmpstr class VGG(Backbone): def __init__(self, stages, num_classes=None, out_features=None): """ """ super().__init__() self.num_classes = num_classes current_stride = 1 self._out_feature_strides = {} self._out_feature_channels = {} self.stages_and_names = [] for i, block in enumerate(stages): name = "res" + str(i + 1) stage = nn.Sequential(*block) self.add_module(name, stage) self.stages_and_names.append((stage, name)) self._out_feature_strides[name] = current_stride = int( current_stride * 2 ) self._out_feature_channels[name] = block[-1].out_channels if num_classes is not None: self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) for m in self.classifier.modules(): if isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0.01) # nn.init.constant_(m.bias, 0) name = "classifier" if out_features is None: out_features = [name] self._out_features = out_features assert len(self._out_features) children = [x[0] for x in self.named_children()] for out_feature in self._out_features: assert out_feature in children, "Available children: {}".format(", ".join(children)) def forward(self, x): outputs = {} for stage, name in self.stages_and_names: # convert ccp to pcc for x=F.avg_pool2d(x,kernel_size=2, stride=2) x = stage(x) if name in self._out_features: outputs[name] = x if isinstance(x,tuple): x,_=x if self.num_classes is not None: x = self.avgpool(x) x = self.classifier(x) if "classifer" in self._out_features: outputs["classifer"] = x return outputs def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } def freeze(self, freeze_at=0): for idx, (stage, _) in enumerate(self.stages_and_names, start=2): if freeze_at >= idx: for block in stage.children(): block.freeze() return self @property def size_divisibility(self): return 32 @staticmethod def make_stage(block_class, in_channels, out_channels,norm): blocks = [] num_blocks=len(block_class) for i in range(num_blocks): blocks.append( block_class[i]( in_channels=in_channels[i], out_channels=out_channels[i], kernel_size=3, stride=1, padding=1, bias=True, norm=get_norm(norm, out_channels[i]), activation=nn.ReLU(True) ) ) return blocks @BACKBONE_REGISTRY.register() def build_points_collection_vgg_backbone(cfg, input_shape): # fmt: off depth = cfg.MODEL.VGG.DEPTH freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT norm = cfg.MODEL.VGG.NORM out_features = cfg.MODEL.VGG.OUT_FEATURES in_channels = input_shape.channels deform_on_per_stage = cfg.MODEL.VGG.DEFORM_ON_PER_STAGE # fmt: on stages = [] out_stage_idx = [ {"res1": 1, "res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features ] max_stage_idx = max(out_stage_idx) stage_inds = [i for i, x in enumerate(cfgs[depth]) if x == "M"] ind = 0 for idx, stage_idx in enumerate(range(1, max_stage_idx + 1)): channel_cfg=cfgs[depth][ind : stage_inds[idx]] if deform_on_per_stage[idx]: block_class=[Conv2d2 for i in range(len(channel_cfg)-1)]+[DeformConv2] else: block_class=[Conv2d2 for i in range(len(channel_cfg))] stage_kargs = { "block_class": block_class, "in_channels": [in_channels]+channel_cfg[:-1], "out_channels": channel_cfg, "norm": norm, } blocks = VGG.make_stage(**stage_kargs) out_channels = channel_cfg[-1] in_channels = out_channels ind = stage_inds[idx] + 1 stages.append(blocks) return VGG(stages, out_features=out_features).freeze(freeze_at)
extra_repr
identifier_name
backbone2.py
import numpy as np import fvcore.nn.weight_init as weight_init import torch.nn.functional as F from torch import nn import torch from torch.nn.modules.utils import _pair from detectron2.layers import CNNBlockBase,ShapeSpec,Conv2d,get_norm,FrozenBatchNorm2d from detectron2.layers.deform_conv import deform_conv from detectron2.modeling import Backbone,BACKBONE_REGISTRY cfgs = { 11: [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"], 13: [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"], 16: [64, 64, "M", 128, 128, "M", 256, 256, 256, "M", 512, 512, 512, "M", 512, 512, 512, "M"], 19: [64,64,"M",128,128,"M",256,256,256,256,"M",512,512,512,512,"M",512,512,512,512,"M"], } class Conv2d2(Conv2d): """ A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. """ def freeze(self): """ Make this block not trainable. This method sets all parameters to `requires_grad=False`, and convert all BatchNorm layers to FrozenBatchNorm Returns: the block itself """ for p in self.parameters(): p.requires_grad = False FrozenBatchNorm2d.convert_frozen_batchnorm(self) return self class DeformConv2(CNNBlockBase): def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=True, norm=None, activation=None, ): """ Deformable convolution from :paper:`deformconv`. Arguments are similar to :class:`Conv2D`. Extra arguments: Args: deformable_groups (int): number of groups used in deformable convolution. norm (nn.Module, optional): a normalization layer activation (callable(Tensor) -> Tensor): a callable activation function """ super(DeformConv2, self).__init__(in_channels,out_channels,stride) assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format( in_channels, groups ) assert ( out_channels % groups == 0 ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size)
self.deformable_groups = deformable_groups self.norm = norm self.activation = activation self.weight = nn.Parameter( torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size) ) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.bias = None nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") if self.bias is not None: nn.init.constant_(self.bias, 0) bottleneck_channels=in_channels//2 offset_channels=2*kernel_size**2 self.buffer_offset=nn.Sequential( Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=1, padding=0, dilation=1, ), nn.ReLU(inplace=True), Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=(5,1), stride=stride, padding=(2*dilation,0), dilation=dilation, ), Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=(1,5), stride=stride, padding=(0,2*dilation), dilation=dilation, ), nn.ReLU(inplace=True) ) self.conv2_offset = Conv2d( bottleneck_channels, offset_channels * deformable_groups, kernel_size=3, stride=stride, padding=1 * dilation, dilation=dilation, ) nn.init.constant_(self.conv2_offset.weight, 0) nn.init.constant_(self.conv2_offset.bias, 0) def forward(self, x): offset_buffer=self.buffer_offset(x) offset = self.conv2_offset(offset_buffer) out = deform_conv( x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups, ) if self.bias is not None: bias=self.bias.view(1,-1,1,1) out+=bias if self.norm is not None: out = self.norm(out) if self.activation is not None: out = self.activation(out) return x,offset def extra_repr(self): tmpstr = "in_channels=" + str(self.in_channels) tmpstr += ", out_channels=" + str(self.out_channels) tmpstr += ", kernel_size=" + str(self.kernel_size) tmpstr += ", stride=" + str(self.stride) tmpstr += ", padding=" + str(self.padding) tmpstr += ", dilation=" + str(self.dilation) tmpstr += ", groups=" + str(self.groups) tmpstr += ", deformable_groups=" + str(self.deformable_groups) tmpstr += ", bias="+str(True) return tmpstr class VGG(Backbone): def __init__(self, stages, num_classes=None, out_features=None): """ """ super().__init__() self.num_classes = num_classes current_stride = 1 self._out_feature_strides = {} self._out_feature_channels = {} self.stages_and_names = [] for i, block in enumerate(stages): name = "res" + str(i + 1) stage = nn.Sequential(*block) self.add_module(name, stage) self.stages_and_names.append((stage, name)) self._out_feature_strides[name] = current_stride = int( current_stride * 2 ) self._out_feature_channels[name] = block[-1].out_channels if num_classes is not None: self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) for m in self.classifier.modules(): if isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0.01) # nn.init.constant_(m.bias, 0) name = "classifier" if out_features is None: out_features = [name] self._out_features = out_features assert len(self._out_features) children = [x[0] for x in self.named_children()] for out_feature in self._out_features: assert out_feature in children, "Available children: {}".format(", ".join(children)) def forward(self, x): outputs = {} for stage, name in self.stages_and_names: # convert ccp to pcc for x=F.avg_pool2d(x,kernel_size=2, stride=2) x = stage(x) if name in self._out_features: outputs[name] = x if isinstance(x,tuple): x,_=x if self.num_classes is not None: x = self.avgpool(x) x = self.classifier(x) if "classifer" in self._out_features: outputs["classifer"] = x return outputs def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } def freeze(self, freeze_at=0): for idx, (stage, _) in enumerate(self.stages_and_names, start=2): if freeze_at >= idx: for block in stage.children(): block.freeze() return self @property def size_divisibility(self): return 32 @staticmethod def make_stage(block_class, in_channels, out_channels,norm): blocks = [] num_blocks=len(block_class) for i in range(num_blocks): blocks.append( block_class[i]( in_channels=in_channels[i], out_channels=out_channels[i], kernel_size=3, stride=1, padding=1, bias=True, norm=get_norm(norm, out_channels[i]), activation=nn.ReLU(True) ) ) return blocks @BACKBONE_REGISTRY.register() def build_points_collection_vgg_backbone(cfg, input_shape): # fmt: off depth = cfg.MODEL.VGG.DEPTH freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT norm = cfg.MODEL.VGG.NORM out_features = cfg.MODEL.VGG.OUT_FEATURES in_channels = input_shape.channels deform_on_per_stage = cfg.MODEL.VGG.DEFORM_ON_PER_STAGE # fmt: on stages = [] out_stage_idx = [ {"res1": 1, "res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features ] max_stage_idx = max(out_stage_idx) stage_inds = [i for i, x in enumerate(cfgs[depth]) if x == "M"] ind = 0 for idx, stage_idx in enumerate(range(1, max_stage_idx + 1)): channel_cfg=cfgs[depth][ind : stage_inds[idx]] if deform_on_per_stage[idx]: block_class=[Conv2d2 for i in range(len(channel_cfg)-1)]+[DeformConv2] else: block_class=[Conv2d2 for i in range(len(channel_cfg))] stage_kargs = { "block_class": block_class, "in_channels": [in_channels]+channel_cfg[:-1], "out_channels": channel_cfg, "norm": norm, } blocks = VGG.make_stage(**stage_kargs) out_channels = channel_cfg[-1] in_channels = out_channels ind = stage_inds[idx] + 1 stages.append(blocks) return VGG(stages, out_features=out_features).freeze(freeze_at)
self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups
random_line_split
transactions_reader.rs
/// Reads transactions from a CSV file /// Make it a separate file in case we want to add new methods /// such as reading from a non-CSV file and so on use std::{ collections::HashMap, io::{BufRead, BufReader}, path::Path, }; use anyhow::Context; use crossbeam_channel::{Receiver, Sender}; use csv::{ByteRecord, ReaderBuilder, Trim}; use crate::records::TransactionRecord; use log::*; /// A type that represents a stream of transactions arriving into the system /// Many channels (such as crossbeam) implement iterator interface, so can be used for multithreading pub type TransactionsStream = Box<dyn Iterator<Item = TransactionRecord>>; /// Trait to read CSV files into a `TransactionsStream` pub trait TransactionCSVReader { /// Read transactions from a CSV file /// Returns a vector with all the transactions nicely packet into structs fn read_csv<P: AsRef<Path>>(self, path: P) -> anyhow::Result<TransactionsStream>; } /// A single threaded bulk reader /// Reads and parses everything upfront and returns a stream to the records pub struct STBulkReader {} impl STBulkReader { pub fn new() -> Self { Self {} } } impl TransactionCSVReader for STBulkReader { fn read_csv<P: AsRef<Path>>(self, path: P) -> anyhow::Result<TransactionsStream> { let start_time = std::time::Instant::now(); info!("STBulkReader reading the transactions"); let mut csv_reader = ReaderBuilder::new() .trim(Trim::All) .flexible(true) .from_path(path)?; // Read as byte records, that should improve the performance without a lot of reallocations let mut raw_record = csv::ByteRecord::new(); let headers = csv_reader.byte_headers()?.clone(); let mut transactions = Vec::new(); while csv_reader.read_byte_record(&mut raw_record)? { let record = raw_record.deserialize::<TransactionRecord>(Some(&headers)); // for simplicity, ignore transactions that cannot be parsed if let Ok(record) = record { transactions.push(record); } } info!( "Read {} records in {:?}. Throughput: {} millions/second", transactions.len(), start_time.elapsed(), transactions.len() as f32 / (1000000.0 * start_time.elapsed().as_secs_f32()) ); Ok(Box::new(transactions.into_iter())) } } /// A multithreaded reader /// Reads blocks of raw bytes from a file (sequentially) /// And then forwards those blocks to a thread pool for deserialization pub struct MTReader { num_threads: usize, block_size: usize, } impl MTReader { pub fn new() -> Self { Self { num_threads: num_cpus::get(), block_size: 32 * 1024, } } pub fn with_threads(mut self, num_threads: usize) -> Self { self.num_threads = num_threads; self } #[allow(dead_code)] pub fn block_size(mut self, block_size: usize) -> Self { self.block_size = block_size; self } } impl TransactionCSVReader for MTReader { fn read_csv<P: AsRef<Path>>(mut self, path: P) -> anyhow::Result<TransactionsStream> { let mut file_reader = BufReader::with_capacity(2 * self.block_size, std::fs::File::open(path)?); let mut headers = vec![]; // read first row file_reader .read_until(b'\n', &mut headers) .with_context(|| "Failed to read the headers")?; let (parsed_tx, parsed_rx) = crossbeam_channel::bounded::<(u32, Vec<TransactionRecord>)>(1000); let (reorder_tx, reorder_rx) = crossbeam_channel::bounded::<TransactionRecord>(100000); let (block_tx, block_rx) = crossbeam_channel::bounded::<(u32, Vec<u8>)>(1000); Self::start_reorder(parsed_rx, reorder_tx); Self::start_dispatcher(self.num_threads, parsed_tx, block_rx); // Read blocks of transactions let _ = std::thread::spawn(move || { let mut block_id = 0; while let Some(block) = self.read_block(&mut file_reader) { block_id += 1; // send them to the thread pool dispatcher if block_tx.send((block_id, block)).is_err() { break; } // the parsed blocks may arrive out of order, so we need to perform a reordering } }); Ok(Box::new(reorder_rx.into_iter())) } } impl MTReader { /// Dispatch a CSV raw block for parsing fn start_dispatcher( num_threads: usize, parsed_tx: Sender<(u32, Vec<TransactionRecord>)>, block_rx: Receiver<(u32, Vec<u8>)>, )
/// Reorders transaction blocks from different thread /// So in the end everything is chronologically in order fn start_reorder( parsed_rx: Receiver<(u32, Vec<TransactionRecord>)>, reorder_tx: Sender<TransactionRecord>, ) { // Ignore the join handle, since the lifetime of the thread is tied to the lifetime of the input and output channels let _ = std::thread::spawn(move || { let mut waiting_for = 1; let mut queue = HashMap::new(); while let Ok(block) = parsed_rx.recv() { if block.0 == waiting_for { for record in block.1 { if reorder_tx.send(record).is_err() { return; }; } waiting_for += 1; // Clear backlog while let Some(transactions) = queue.remove(&waiting_for) { for record in transactions { if reorder_tx.send(record).is_err() { return; }; } waiting_for += 1; } } else if block.0 > waiting_for { queue.insert(block.0, block.1); } } }); } // Reads a big block until new line alignment fn read_block(&mut self, reader: &mut impl BufRead) -> Option<Vec<u8>> { let mut block = vec![0; self.block_size]; // put additional for adjustments block.reserve(1000); match reader.read(&mut block) { Ok(0) => None, Ok(n) => { block.truncate(n); // do not care if we reach EOF for now let _ = reader.read_until(b'\n', &mut block); Some(block) } Err(_) => None, } } } #[cfg(test)] mod tests { use rust_decimal_macros::dec; use super::*; use crate::records::TransactionType; #[test] fn test_no_file_exists() { let transactions = STBulkReader::new().read_csv("tests/data/non_existent.csv"); assert!(transactions.is_err()); } fn test_transaction_reader(reader: impl TransactionCSVReader, path: &str) { let mut transactions = reader.read_csv(&path).expect("Test file is not found"); // Validate a few fields to give us enough confidence that parsing is successful let trans = transactions.next().unwrap(); assert_eq!(trans.tr_type, TransactionType::Deposit); let trans = transactions.next().unwrap(); assert_eq!(trans.tr_type, TransactionType::Withdrawal); assert_eq!(trans.client, 6); assert_eq!(trans.tx, 5); assert_eq!(trans.amount, Some(dec!(9.0))); let trans = transactions.skip(2).next().unwrap(); assert_eq!(trans.tr_type, TransactionType::ChargeBack); assert_eq!(trans.amount, None); } /// Tests that we can read and parse all transactions #[test] fn test_st_bulk_transaction_reader_serde() { let reader = STBulkReader::new(); test_transaction_reader(reader, "tests/data/test_serde.csv"); } #[test] fn test_mt_reader_transaction_reader_serde() { let reader = MTReader::new(); test_transaction_reader(reader, "tests/data/test_serde.csv"); } #[test] fn test_mt_reader_transaction_reader_big() { let reader = MTReader::new(); let mut transactions = reader .read_csv("tests/data/test_mt_reader.csv") .expect("Test file is not found"); for i in 1..20001 { assert_eq!(transactions.next().unwrap().tx, i); } assert!(transactions.next().is_none()); } }
{ for _ in 0..num_threads { let block_rx = block_rx.clone(); let parsed_tx = parsed_tx.clone(); // For now consider that the headers if read then they're OK and equal to below let headers = ByteRecord::from(vec!["type", "client", "tx", "amount"]); std::thread::spawn(move || { while let Ok((block_id, block)) = block_rx.recv() { let mut csv_reader = ReaderBuilder::new() .trim(Trim::All) .has_headers(true) .flexible(true) .from_reader(block.as_slice()); let mut raw_record = csv::ByteRecord::new(); // Looks like I have found a bug in CSV library // It doesn't trim the first row if has_headers = false and the headers are supplied to deserialize // I'll open a bug on github csv_reader.set_byte_headers(headers.clone()); let mut transactions = Vec::new(); while let Ok(true) = csv_reader.read_byte_record(&mut raw_record) { let record = raw_record.deserialize::<TransactionRecord>(Some(&headers)); if let Ok(record) = record { transactions.push(record); } } // Will ignore the channel closed for now let _ = parsed_tx.send((block_id, transactions)); } }); } }
identifier_body
transactions_reader.rs
/// Reads transactions from a CSV file /// Make it a separate file in case we want to add new methods /// such as reading from a non-CSV file and so on use std::{ collections::HashMap, io::{BufRead, BufReader}, path::Path, }; use anyhow::Context; use crossbeam_channel::{Receiver, Sender}; use csv::{ByteRecord, ReaderBuilder, Trim}; use crate::records::TransactionRecord; use log::*; /// A type that represents a stream of transactions arriving into the system /// Many channels (such as crossbeam) implement iterator interface, so can be used for multithreading pub type TransactionsStream = Box<dyn Iterator<Item = TransactionRecord>>; /// Trait to read CSV files into a `TransactionsStream` pub trait TransactionCSVReader { /// Read transactions from a CSV file /// Returns a vector with all the transactions nicely packet into structs fn read_csv<P: AsRef<Path>>(self, path: P) -> anyhow::Result<TransactionsStream>; } /// A single threaded bulk reader /// Reads and parses everything upfront and returns a stream to the records pub struct STBulkReader {} impl STBulkReader { pub fn new() -> Self { Self {} } } impl TransactionCSVReader for STBulkReader { fn read_csv<P: AsRef<Path>>(self, path: P) -> anyhow::Result<TransactionsStream> { let start_time = std::time::Instant::now(); info!("STBulkReader reading the transactions"); let mut csv_reader = ReaderBuilder::new() .trim(Trim::All) .flexible(true) .from_path(path)?; // Read as byte records, that should improve the performance without a lot of reallocations let mut raw_record = csv::ByteRecord::new(); let headers = csv_reader.byte_headers()?.clone(); let mut transactions = Vec::new(); while csv_reader.read_byte_record(&mut raw_record)? { let record = raw_record.deserialize::<TransactionRecord>(Some(&headers)); // for simplicity, ignore transactions that cannot be parsed if let Ok(record) = record { transactions.push(record); } } info!( "Read {} records in {:?}. Throughput: {} millions/second", transactions.len(), start_time.elapsed(), transactions.len() as f32 / (1000000.0 * start_time.elapsed().as_secs_f32()) ); Ok(Box::new(transactions.into_iter())) } } /// A multithreaded reader /// Reads blocks of raw bytes from a file (sequentially) /// And then forwards those blocks to a thread pool for deserialization pub struct MTReader { num_threads: usize, block_size: usize, } impl MTReader { pub fn new() -> Self { Self { num_threads: num_cpus::get(), block_size: 32 * 1024, } } pub fn with_threads(mut self, num_threads: usize) -> Self { self.num_threads = num_threads; self } #[allow(dead_code)] pub fn block_size(mut self, block_size: usize) -> Self { self.block_size = block_size; self } } impl TransactionCSVReader for MTReader { fn read_csv<P: AsRef<Path>>(mut self, path: P) -> anyhow::Result<TransactionsStream> { let mut file_reader = BufReader::with_capacity(2 * self.block_size, std::fs::File::open(path)?); let mut headers = vec![]; // read first row file_reader .read_until(b'\n', &mut headers) .with_context(|| "Failed to read the headers")?; let (parsed_tx, parsed_rx) = crossbeam_channel::bounded::<(u32, Vec<TransactionRecord>)>(1000); let (reorder_tx, reorder_rx) = crossbeam_channel::bounded::<TransactionRecord>(100000); let (block_tx, block_rx) = crossbeam_channel::bounded::<(u32, Vec<u8>)>(1000); Self::start_reorder(parsed_rx, reorder_tx); Self::start_dispatcher(self.num_threads, parsed_tx, block_rx); // Read blocks of transactions let _ = std::thread::spawn(move || { let mut block_id = 0; while let Some(block) = self.read_block(&mut file_reader) { block_id += 1; // send them to the thread pool dispatcher if block_tx.send((block_id, block)).is_err() { break; } // the parsed blocks may arrive out of order, so we need to perform a reordering } }); Ok(Box::new(reorder_rx.into_iter())) } } impl MTReader { /// Dispatch a CSV raw block for parsing fn start_dispatcher( num_threads: usize, parsed_tx: Sender<(u32, Vec<TransactionRecord>)>, block_rx: Receiver<(u32, Vec<u8>)>, ) { for _ in 0..num_threads { let block_rx = block_rx.clone(); let parsed_tx = parsed_tx.clone(); // For now consider that the headers if read then they're OK and equal to below let headers = ByteRecord::from(vec!["type", "client", "tx", "amount"]); std::thread::spawn(move || { while let Ok((block_id, block)) = block_rx.recv() { let mut csv_reader = ReaderBuilder::new() .trim(Trim::All) .has_headers(true) .flexible(true) .from_reader(block.as_slice()); let mut raw_record = csv::ByteRecord::new(); // Looks like I have found a bug in CSV library // It doesn't trim the first row if has_headers = false and the headers are supplied to deserialize // I'll open a bug on github csv_reader.set_byte_headers(headers.clone()); let mut transactions = Vec::new(); while let Ok(true) = csv_reader.read_byte_record(&mut raw_record) { let record = raw_record.deserialize::<TransactionRecord>(Some(&headers)); if let Ok(record) = record { transactions.push(record); } } // Will ignore the channel closed for now let _ = parsed_tx.send((block_id, transactions)); } }); } } /// Reorders transaction blocks from different thread /// So in the end everything is chronologically in order fn start_reorder( parsed_rx: Receiver<(u32, Vec<TransactionRecord>)>, reorder_tx: Sender<TransactionRecord>, ) { // Ignore the join handle, since the lifetime of the thread is tied to the lifetime of the input and output channels let _ = std::thread::spawn(move || { let mut waiting_for = 1; let mut queue = HashMap::new(); while let Ok(block) = parsed_rx.recv() { if block.0 == waiting_for { for record in block.1 { if reorder_tx.send(record).is_err() { return; }; } waiting_for += 1; // Clear backlog while let Some(transactions) = queue.remove(&waiting_for) { for record in transactions { if reorder_tx.send(record).is_err()
; } waiting_for += 1; } } else if block.0 > waiting_for { queue.insert(block.0, block.1); } } }); } // Reads a big block until new line alignment fn read_block(&mut self, reader: &mut impl BufRead) -> Option<Vec<u8>> { let mut block = vec![0; self.block_size]; // put additional for adjustments block.reserve(1000); match reader.read(&mut block) { Ok(0) => None, Ok(n) => { block.truncate(n); // do not care if we reach EOF for now let _ = reader.read_until(b'\n', &mut block); Some(block) } Err(_) => None, } } } #[cfg(test)] mod tests { use rust_decimal_macros::dec; use super::*; use crate::records::TransactionType; #[test] fn test_no_file_exists() { let transactions = STBulkReader::new().read_csv("tests/data/non_existent.csv"); assert!(transactions.is_err()); } fn test_transaction_reader(reader: impl TransactionCSVReader, path: &str) { let mut transactions = reader.read_csv(&path).expect("Test file is not found"); // Validate a few fields to give us enough confidence that parsing is successful let trans = transactions.next().unwrap(); assert_eq!(trans.tr_type, TransactionType::Deposit); let trans = transactions.next().unwrap(); assert_eq!(trans.tr_type, TransactionType::Withdrawal); assert_eq!(trans.client, 6); assert_eq!(trans.tx, 5); assert_eq!(trans.amount, Some(dec!(9.0))); let trans = transactions.skip(2).next().unwrap(); assert_eq!(trans.tr_type, TransactionType::ChargeBack); assert_eq!(trans.amount, None); } /// Tests that we can read and parse all transactions #[test] fn test_st_bulk_transaction_reader_serde() { let reader = STBulkReader::new(); test_transaction_reader(reader, "tests/data/test_serde.csv"); } #[test] fn test_mt_reader_transaction_reader_serde() { let reader = MTReader::new(); test_transaction_reader(reader, "tests/data/test_serde.csv"); } #[test] fn test_mt_reader_transaction_reader_big() { let reader = MTReader::new(); let mut transactions = reader .read_csv("tests/data/test_mt_reader.csv") .expect("Test file is not found"); for i in 1..20001 { assert_eq!(transactions.next().unwrap().tx, i); } assert!(transactions.next().is_none()); } }
{ return; }
conditional_block
transactions_reader.rs
/// Reads transactions from a CSV file /// Make it a separate file in case we want to add new methods /// such as reading from a non-CSV file and so on use std::{ collections::HashMap, io::{BufRead, BufReader}, path::Path, }; use anyhow::Context; use crossbeam_channel::{Receiver, Sender}; use csv::{ByteRecord, ReaderBuilder, Trim}; use crate::records::TransactionRecord; use log::*; /// A type that represents a stream of transactions arriving into the system /// Many channels (such as crossbeam) implement iterator interface, so can be used for multithreading pub type TransactionsStream = Box<dyn Iterator<Item = TransactionRecord>>; /// Trait to read CSV files into a `TransactionsStream` pub trait TransactionCSVReader { /// Read transactions from a CSV file /// Returns a vector with all the transactions nicely packet into structs fn read_csv<P: AsRef<Path>>(self, path: P) -> anyhow::Result<TransactionsStream>; } /// A single threaded bulk reader /// Reads and parses everything upfront and returns a stream to the records pub struct STBulkReader {} impl STBulkReader { pub fn new() -> Self { Self {} } } impl TransactionCSVReader for STBulkReader { fn read_csv<P: AsRef<Path>>(self, path: P) -> anyhow::Result<TransactionsStream> { let start_time = std::time::Instant::now(); info!("STBulkReader reading the transactions"); let mut csv_reader = ReaderBuilder::new() .trim(Trim::All) .flexible(true) .from_path(path)?; // Read as byte records, that should improve the performance without a lot of reallocations let mut raw_record = csv::ByteRecord::new(); let headers = csv_reader.byte_headers()?.clone(); let mut transactions = Vec::new(); while csv_reader.read_byte_record(&mut raw_record)? { let record = raw_record.deserialize::<TransactionRecord>(Some(&headers)); // for simplicity, ignore transactions that cannot be parsed if let Ok(record) = record { transactions.push(record); } } info!( "Read {} records in {:?}. Throughput: {} millions/second", transactions.len(), start_time.elapsed(), transactions.len() as f32 / (1000000.0 * start_time.elapsed().as_secs_f32()) ); Ok(Box::new(transactions.into_iter())) } } /// A multithreaded reader /// Reads blocks of raw bytes from a file (sequentially) /// And then forwards those blocks to a thread pool for deserialization pub struct MTReader { num_threads: usize, block_size: usize, } impl MTReader { pub fn new() -> Self { Self { num_threads: num_cpus::get(), block_size: 32 * 1024, } } pub fn with_threads(mut self, num_threads: usize) -> Self { self.num_threads = num_threads; self } #[allow(dead_code)] pub fn block_size(mut self, block_size: usize) -> Self { self.block_size = block_size; self } } impl TransactionCSVReader for MTReader { fn read_csv<P: AsRef<Path>>(mut self, path: P) -> anyhow::Result<TransactionsStream> { let mut file_reader = BufReader::with_capacity(2 * self.block_size, std::fs::File::open(path)?); let mut headers = vec![]; // read first row file_reader .read_until(b'\n', &mut headers) .with_context(|| "Failed to read the headers")?; let (parsed_tx, parsed_rx) = crossbeam_channel::bounded::<(u32, Vec<TransactionRecord>)>(1000); let (reorder_tx, reorder_rx) = crossbeam_channel::bounded::<TransactionRecord>(100000); let (block_tx, block_rx) = crossbeam_channel::bounded::<(u32, Vec<u8>)>(1000); Self::start_reorder(parsed_rx, reorder_tx); Self::start_dispatcher(self.num_threads, parsed_tx, block_rx); // Read blocks of transactions let _ = std::thread::spawn(move || { let mut block_id = 0; while let Some(block) = self.read_block(&mut file_reader) { block_id += 1; // send them to the thread pool dispatcher if block_tx.send((block_id, block)).is_err() { break; } // the parsed blocks may arrive out of order, so we need to perform a reordering } }); Ok(Box::new(reorder_rx.into_iter())) } } impl MTReader { /// Dispatch a CSV raw block for parsing fn start_dispatcher( num_threads: usize, parsed_tx: Sender<(u32, Vec<TransactionRecord>)>, block_rx: Receiver<(u32, Vec<u8>)>, ) { for _ in 0..num_threads { let block_rx = block_rx.clone(); let parsed_tx = parsed_tx.clone(); // For now consider that the headers if read then they're OK and equal to below let headers = ByteRecord::from(vec!["type", "client", "tx", "amount"]); std::thread::spawn(move || { while let Ok((block_id, block)) = block_rx.recv() { let mut csv_reader = ReaderBuilder::new() .trim(Trim::All) .has_headers(true) .flexible(true) .from_reader(block.as_slice()); let mut raw_record = csv::ByteRecord::new(); // Looks like I have found a bug in CSV library // It doesn't trim the first row if has_headers = false and the headers are supplied to deserialize // I'll open a bug on github csv_reader.set_byte_headers(headers.clone()); let mut transactions = Vec::new(); while let Ok(true) = csv_reader.read_byte_record(&mut raw_record) { let record = raw_record.deserialize::<TransactionRecord>(Some(&headers)); if let Ok(record) = record { transactions.push(record); } } // Will ignore the channel closed for now let _ = parsed_tx.send((block_id, transactions)); } }); } } /// Reorders transaction blocks from different thread /// So in the end everything is chronologically in order fn start_reorder( parsed_rx: Receiver<(u32, Vec<TransactionRecord>)>, reorder_tx: Sender<TransactionRecord>, ) { // Ignore the join handle, since the lifetime of the thread is tied to the lifetime of the input and output channels let _ = std::thread::spawn(move || { let mut waiting_for = 1; let mut queue = HashMap::new(); while let Ok(block) = parsed_rx.recv() { if block.0 == waiting_for { for record in block.1 { if reorder_tx.send(record).is_err() { return; }; } waiting_for += 1; // Clear backlog while let Some(transactions) = queue.remove(&waiting_for) { for record in transactions { if reorder_tx.send(record).is_err() { return; }; } waiting_for += 1; } } else if block.0 > waiting_for { queue.insert(block.0, block.1); } } }); } // Reads a big block until new line alignment fn read_block(&mut self, reader: &mut impl BufRead) -> Option<Vec<u8>> { let mut block = vec![0; self.block_size]; // put additional for adjustments block.reserve(1000); match reader.read(&mut block) { Ok(0) => None, Ok(n) => { block.truncate(n); // do not care if we reach EOF for now let _ = reader.read_until(b'\n', &mut block); Some(block) } Err(_) => None, } } } #[cfg(test)] mod tests { use rust_decimal_macros::dec; use super::*; use crate::records::TransactionType; #[test] fn test_no_file_exists() { let transactions = STBulkReader::new().read_csv("tests/data/non_existent.csv"); assert!(transactions.is_err()); } fn test_transaction_reader(reader: impl TransactionCSVReader, path: &str) { let mut transactions = reader.read_csv(&path).expect("Test file is not found"); // Validate a few fields to give us enough confidence that parsing is successful let trans = transactions.next().unwrap(); assert_eq!(trans.tr_type, TransactionType::Deposit); let trans = transactions.next().unwrap(); assert_eq!(trans.tr_type, TransactionType::Withdrawal); assert_eq!(trans.client, 6); assert_eq!(trans.tx, 5); assert_eq!(trans.amount, Some(dec!(9.0))); let trans = transactions.skip(2).next().unwrap(); assert_eq!(trans.tr_type, TransactionType::ChargeBack); assert_eq!(trans.amount, None); }
/// Tests that we can read and parse all transactions #[test] fn test_st_bulk_transaction_reader_serde() { let reader = STBulkReader::new(); test_transaction_reader(reader, "tests/data/test_serde.csv"); } #[test] fn test_mt_reader_transaction_reader_serde() { let reader = MTReader::new(); test_transaction_reader(reader, "tests/data/test_serde.csv"); } #[test] fn test_mt_reader_transaction_reader_big() { let reader = MTReader::new(); let mut transactions = reader .read_csv("tests/data/test_mt_reader.csv") .expect("Test file is not found"); for i in 1..20001 { assert_eq!(transactions.next().unwrap().tx, i); } assert!(transactions.next().is_none()); } }
random_line_split
transactions_reader.rs
/// Reads transactions from a CSV file /// Make it a separate file in case we want to add new methods /// such as reading from a non-CSV file and so on use std::{ collections::HashMap, io::{BufRead, BufReader}, path::Path, }; use anyhow::Context; use crossbeam_channel::{Receiver, Sender}; use csv::{ByteRecord, ReaderBuilder, Trim}; use crate::records::TransactionRecord; use log::*; /// A type that represents a stream of transactions arriving into the system /// Many channels (such as crossbeam) implement iterator interface, so can be used for multithreading pub type TransactionsStream = Box<dyn Iterator<Item = TransactionRecord>>; /// Trait to read CSV files into a `TransactionsStream` pub trait TransactionCSVReader { /// Read transactions from a CSV file /// Returns a vector with all the transactions nicely packet into structs fn read_csv<P: AsRef<Path>>(self, path: P) -> anyhow::Result<TransactionsStream>; } /// A single threaded bulk reader /// Reads and parses everything upfront and returns a stream to the records pub struct
{} impl STBulkReader { pub fn new() -> Self { Self {} } } impl TransactionCSVReader for STBulkReader { fn read_csv<P: AsRef<Path>>(self, path: P) -> anyhow::Result<TransactionsStream> { let start_time = std::time::Instant::now(); info!("STBulkReader reading the transactions"); let mut csv_reader = ReaderBuilder::new() .trim(Trim::All) .flexible(true) .from_path(path)?; // Read as byte records, that should improve the performance without a lot of reallocations let mut raw_record = csv::ByteRecord::new(); let headers = csv_reader.byte_headers()?.clone(); let mut transactions = Vec::new(); while csv_reader.read_byte_record(&mut raw_record)? { let record = raw_record.deserialize::<TransactionRecord>(Some(&headers)); // for simplicity, ignore transactions that cannot be parsed if let Ok(record) = record { transactions.push(record); } } info!( "Read {} records in {:?}. Throughput: {} millions/second", transactions.len(), start_time.elapsed(), transactions.len() as f32 / (1000000.0 * start_time.elapsed().as_secs_f32()) ); Ok(Box::new(transactions.into_iter())) } } /// A multithreaded reader /// Reads blocks of raw bytes from a file (sequentially) /// And then forwards those blocks to a thread pool for deserialization pub struct MTReader { num_threads: usize, block_size: usize, } impl MTReader { pub fn new() -> Self { Self { num_threads: num_cpus::get(), block_size: 32 * 1024, } } pub fn with_threads(mut self, num_threads: usize) -> Self { self.num_threads = num_threads; self } #[allow(dead_code)] pub fn block_size(mut self, block_size: usize) -> Self { self.block_size = block_size; self } } impl TransactionCSVReader for MTReader { fn read_csv<P: AsRef<Path>>(mut self, path: P) -> anyhow::Result<TransactionsStream> { let mut file_reader = BufReader::with_capacity(2 * self.block_size, std::fs::File::open(path)?); let mut headers = vec![]; // read first row file_reader .read_until(b'\n', &mut headers) .with_context(|| "Failed to read the headers")?; let (parsed_tx, parsed_rx) = crossbeam_channel::bounded::<(u32, Vec<TransactionRecord>)>(1000); let (reorder_tx, reorder_rx) = crossbeam_channel::bounded::<TransactionRecord>(100000); let (block_tx, block_rx) = crossbeam_channel::bounded::<(u32, Vec<u8>)>(1000); Self::start_reorder(parsed_rx, reorder_tx); Self::start_dispatcher(self.num_threads, parsed_tx, block_rx); // Read blocks of transactions let _ = std::thread::spawn(move || { let mut block_id = 0; while let Some(block) = self.read_block(&mut file_reader) { block_id += 1; // send them to the thread pool dispatcher if block_tx.send((block_id, block)).is_err() { break; } // the parsed blocks may arrive out of order, so we need to perform a reordering } }); Ok(Box::new(reorder_rx.into_iter())) } } impl MTReader { /// Dispatch a CSV raw block for parsing fn start_dispatcher( num_threads: usize, parsed_tx: Sender<(u32, Vec<TransactionRecord>)>, block_rx: Receiver<(u32, Vec<u8>)>, ) { for _ in 0..num_threads { let block_rx = block_rx.clone(); let parsed_tx = parsed_tx.clone(); // For now consider that the headers if read then they're OK and equal to below let headers = ByteRecord::from(vec!["type", "client", "tx", "amount"]); std::thread::spawn(move || { while let Ok((block_id, block)) = block_rx.recv() { let mut csv_reader = ReaderBuilder::new() .trim(Trim::All) .has_headers(true) .flexible(true) .from_reader(block.as_slice()); let mut raw_record = csv::ByteRecord::new(); // Looks like I have found a bug in CSV library // It doesn't trim the first row if has_headers = false and the headers are supplied to deserialize // I'll open a bug on github csv_reader.set_byte_headers(headers.clone()); let mut transactions = Vec::new(); while let Ok(true) = csv_reader.read_byte_record(&mut raw_record) { let record = raw_record.deserialize::<TransactionRecord>(Some(&headers)); if let Ok(record) = record { transactions.push(record); } } // Will ignore the channel closed for now let _ = parsed_tx.send((block_id, transactions)); } }); } } /// Reorders transaction blocks from different thread /// So in the end everything is chronologically in order fn start_reorder( parsed_rx: Receiver<(u32, Vec<TransactionRecord>)>, reorder_tx: Sender<TransactionRecord>, ) { // Ignore the join handle, since the lifetime of the thread is tied to the lifetime of the input and output channels let _ = std::thread::spawn(move || { let mut waiting_for = 1; let mut queue = HashMap::new(); while let Ok(block) = parsed_rx.recv() { if block.0 == waiting_for { for record in block.1 { if reorder_tx.send(record).is_err() { return; }; } waiting_for += 1; // Clear backlog while let Some(transactions) = queue.remove(&waiting_for) { for record in transactions { if reorder_tx.send(record).is_err() { return; }; } waiting_for += 1; } } else if block.0 > waiting_for { queue.insert(block.0, block.1); } } }); } // Reads a big block until new line alignment fn read_block(&mut self, reader: &mut impl BufRead) -> Option<Vec<u8>> { let mut block = vec![0; self.block_size]; // put additional for adjustments block.reserve(1000); match reader.read(&mut block) { Ok(0) => None, Ok(n) => { block.truncate(n); // do not care if we reach EOF for now let _ = reader.read_until(b'\n', &mut block); Some(block) } Err(_) => None, } } } #[cfg(test)] mod tests { use rust_decimal_macros::dec; use super::*; use crate::records::TransactionType; #[test] fn test_no_file_exists() { let transactions = STBulkReader::new().read_csv("tests/data/non_existent.csv"); assert!(transactions.is_err()); } fn test_transaction_reader(reader: impl TransactionCSVReader, path: &str) { let mut transactions = reader.read_csv(&path).expect("Test file is not found"); // Validate a few fields to give us enough confidence that parsing is successful let trans = transactions.next().unwrap(); assert_eq!(trans.tr_type, TransactionType::Deposit); let trans = transactions.next().unwrap(); assert_eq!(trans.tr_type, TransactionType::Withdrawal); assert_eq!(trans.client, 6); assert_eq!(trans.tx, 5); assert_eq!(trans.amount, Some(dec!(9.0))); let trans = transactions.skip(2).next().unwrap(); assert_eq!(trans.tr_type, TransactionType::ChargeBack); assert_eq!(trans.amount, None); } /// Tests that we can read and parse all transactions #[test] fn test_st_bulk_transaction_reader_serde() { let reader = STBulkReader::new(); test_transaction_reader(reader, "tests/data/test_serde.csv"); } #[test] fn test_mt_reader_transaction_reader_serde() { let reader = MTReader::new(); test_transaction_reader(reader, "tests/data/test_serde.csv"); } #[test] fn test_mt_reader_transaction_reader_big() { let reader = MTReader::new(); let mut transactions = reader .read_csv("tests/data/test_mt_reader.csv") .expect("Test file is not found"); for i in 1..20001 { assert_eq!(transactions.next().unwrap().tx, i); } assert!(transactions.next().is_none()); } }
STBulkReader
identifier_name
dir.rs
use std::char::{decode_utf16, REPLACEMENT_CHARACTER}; use std::ffi::OsStr; use std::fmt; use std::io; use std::str; use traits::{self, Dir as DirTrait, Entry as EntryTrait}; use util::VecExt; use vfat::{Attributes, Date, Metadata, Time, Timestamp}; use vfat::{Cluster, Entry, File, Shared, VFat}; #[derive(Debug)] pub struct Dir { vfat: Shared<VFat>, start: Cluster, name: String, metadata: Metadata, } impl Dir { pub fn new(vfat: Shared<VFat>, start: Cluster, name: String, metadata: Metadata) -> Dir { Dir { vfat, start, name, metadata, } } pub fn name(&self) -> &str { &self.name } pub fn metadata(&self) -> &Metadata { &self.metadata } /// Finds the entry named `name` in `self` and returns it. Comparison is /// case-insensitive. /// /// # Errors /// /// If no entry with name `name` exists in `self`, an error of `NotFound` is /// returned. /// /// If `name` contains invalid UTF-8 characters, an error of `InvalidInput` /// is returned. pub fn find<P: AsRef<OsStr>>(&self, name: P) -> io::Result<Entry> { let name = name.as_ref().to_str().ok_or(io::Error::new( io::ErrorKind::InvalidInput, "name is not valid utf-8", ))?; let entry = self .entries()? .find(|entry| entry.name().eq_ignore_ascii_case(name.as_ref())) .ok_or(io::Error::new( io::ErrorKind::NotFound, format!("{}: not found", name), ))?; Ok(entry) } } #[repr(C, packed)] #[derive(Copy, Clone)] pub struct VFatRegularDirEntry { name: [u8; 8], extension: [u8; 3], attributes: u8, _nt_reserved: u8, _created_time_tenths_second: u8, created_time: u16, created_date: u16, accessed_date: u16, cluster_high: u16, modified_time: u16, modified_date: u16, cluster_low: u16, size: u32, } impl fmt::Debug for VFatRegularDirEntry { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("VFatRegularDirEntry") .field("name", &self.name()) .field("attributes", &self.attributes) .field("created_time", &self.created_time) .field("created_date", &self.created_date) .field("accessed_date", &self.accessed_date) .field("modified_time", &self.modified_time) .field("modified_date", &self.modified_date) .field("cluster", &self.cluster()) .finish() } } impl VFatRegularDirEntry { fn sentinel(&self) -> bool { self.name[0] == 0x00 } fn deleted(&self) -> bool { self.name[0] == 0x05 || self.name[0] == 0xE5 } fn cluster(&self) -> Cluster { Cluster::from(((self.cluster_high as u32) << 16) | (self.cluster_low as u32)) } fn created(&self) -> Timestamp { let date = Date::from_raw(self.created_date); let time = Time::from_raw(self.created_time); Timestamp::new(date, time) } fn accessed(&self) -> Timestamp { let date = Date::from_raw(self.accessed_date); Timestamp::new(date, Default::default()) } fn modified(&self) -> Timestamp { let date = Date::from_raw(self.modified_date); let time = Time::from_raw(self.modified_time); Timestamp::new(date, time) } fn attributes(&self) -> Attributes { Attributes::from_raw(self.attributes) } fn size(&self) -> u64 { self.size as u64 } fn metadata(&self) -> Metadata { let attributes = self.attributes(); let created = self.created(); let accessed = self.accessed(); let modified = self.modified(); let size = self.size(); Metadata { attributes, created, accessed, modified, size, } } fn name(&self) -> Option<String> { let &name_stop = &self.name[..] .iter() .position(|&c| c == 0x00 || c == b' ') .unwrap_or(self.name.len()); let &ext_stop = &self.extension[..] .iter() .position(|&c| c == 0x00 || c == b' ') .unwrap_or(self.extension.len()); let name = str::from_utf8(&self.name[..name_stop]).ok()?; let extension = str::from_utf8(&self.extension[..ext_stop]).ok()?; if name == "" { return None; } if extension != "" { Some(format!("{}.{}", name, extension)) } else { Some(format!("{}", name)) } } } #[repr(C, packed)] #[derive(Copy, Clone, Debug)] pub struct VFatLfnDirEntry { seqno: u8, name_1: [u16; 5], attributes: u8, _reserved_1: u8, dos_checksum: u8, name_2: [u16; 6], _reserved_2: [u8; 2], name_3: [u16; 2], } #[repr(C, packed)] #[derive(Copy, Clone)] pub struct VFatUnknownDirEntry { _unknown_1: [u8; 11], attributes: u8, _unknown_2: [u8; 20], } pub union VFatDirEntry { unknown: VFatUnknownDirEntry, regular: VFatRegularDirEntry, long_filename: VFatLfnDirEntry, } impl From<VFatRegularDirEntry> for VFatEntry { fn from(regular: VFatRegularDirEntry) -> VFatEntry { VFatEntry::Regular(regular) } } impl From<VFatLfnDirEntry> for VFatEntry { fn from(lfn: VFatLfnDirEntry) -> VFatEntry { VFatEntry::Lfn(lfn) } } impl<'a> From<&'a VFatDirEntry> for VFatEntry { fn from(dir_entry: &'a VFatDirEntry) -> VFatEntry { let attributes = unsafe { dir_entry.unknown.attributes }; let attributes = Attributes::from_raw(attributes); unsafe { match (attributes.lfn(), dir_entry) { (true, &VFatDirEntry { long_filename }) => long_filename.into(), (false, &VFatDirEntry { regular }) => regular.into(), } } } } #[derive(Debug)] enum VFatEntry { Regular(VFatRegularDirEntry), Lfn(VFatLfnDirEntry), } impl VFatEntry { fn regular(&self) -> Option<&VFatRegularDirEntry> { if let &VFatEntry::Regular(ref reg) = self { Some(reg) } else { None } } fn lfn(&self) -> Option<&VFatLfnDirEntry> { if let &VFatEntry::Lfn(ref lfn) = self { Some(lfn) } else { None } } } impl traits::Dir for Dir { /// The type of entry stored in this directory. type Entry = Entry; /// An type that is an iterator over the entries in this directory. type Iter = DirIter; /// Returns an interator over the entries in this directory. fn entries(&self) -> io::Result<Self::Iter> { let mut vfat = self.vfat.borrow_mut(); let mut buf = vec![]; vfat.read_chain(self.start, &mut buf, None)?; let buf = unsafe { buf.cast::<VFatDirEntry>() }; Ok(DirIter::new(self.vfat.clone(), buf)) } } pub struct DirIter { vfat: Shared<VFat>, buf: Vec<VFatDirEntry>, current: usize, } impl DirIter { fn new(vfat: Shared<VFat>, buf: Vec<VFatDirEntry>) -> DirIter { DirIter { vfat, buf, current: 0, } } fn name_from_lfn(&self, lfn_start: usize, lfn_stop: usize) -> Option<String> { let mut entries: Vec<VFatLfnDirEntry> = (&self.buf[lfn_start..lfn_stop]) .iter() .rev() .map(|entry| entry.into()) // first ensure that we stop at the preceding regular in the array .take_while(|entry| { if let &VFatEntry::Lfn(_) = entry { true } else
}).filter_map(|entry| match entry.lfn() { Some(lfn) if lfn.seqno != 0xE5 => Some(*lfn), _ => None, }).collect(); entries.sort_by_key(|lfn| lfn.seqno); let mut name: Vec<u16> = vec![]; for &lfn in entries.iter() { name.extend(lfn.name_1.iter()); name.extend(lfn.name_2.iter()); name.extend(lfn.name_3.iter()); } let end = name .iter() .position(|&c| c == 0x0000u16) .unwrap_or(name.len()); let s = decode_utf16((&name[..end]).iter().cloned()) .map(|c| c.unwrap_or(REPLACEMENT_CHARACTER)) .collect::<String>(); if s.is_empty() { None } else { Some(s) } } } impl Iterator for DirIter { type Item = Entry; fn next(&mut self) -> Option<Self::Item> { if self.current >= self.buf.len() { return None; } let (regular_index, regular, name) = (&self.buf)[self.current..] .iter() .enumerate() .filter_map(|(i, union_entry)| { let index = self.current + i; let entry: VFatEntry = union_entry.into(); let regular = entry.regular()?; if !regular.deleted() && !regular.sentinel() { Some((index, *regular)) } else { None } }).next() .and_then(|(regular_index, regular)| { let name = if self.current < regular_index { self.name_from_lfn(self.current, regular_index) } else { None }; let name = name.or_else(|| regular.name())?; Some((regular_index, regular, name)) })?; self.current = regular_index + 1; let metadata = regular.metadata(); let start = regular.cluster(); let vfat = self.vfat.clone(); if metadata.attributes.directory() { Some(Entry::Dir(Dir::new(vfat, start, name, metadata))) } else { Some(Entry::File(File::new(vfat, start, name, metadata))) } } }
{ false }
conditional_block
dir.rs
use std::char::{decode_utf16, REPLACEMENT_CHARACTER}; use std::ffi::OsStr; use std::fmt; use std::io; use std::str; use traits::{self, Dir as DirTrait, Entry as EntryTrait}; use util::VecExt; use vfat::{Attributes, Date, Metadata, Time, Timestamp}; use vfat::{Cluster, Entry, File, Shared, VFat}; #[derive(Debug)] pub struct Dir { vfat: Shared<VFat>, start: Cluster, name: String, metadata: Metadata, } impl Dir { pub fn new(vfat: Shared<VFat>, start: Cluster, name: String, metadata: Metadata) -> Dir { Dir { vfat, start, name, metadata, } } pub fn name(&self) -> &str { &self.name } pub fn metadata(&self) -> &Metadata { &self.metadata } /// Finds the entry named `name` in `self` and returns it. Comparison is /// case-insensitive. /// /// # Errors /// /// If no entry with name `name` exists in `self`, an error of `NotFound` is /// returned. /// /// If `name` contains invalid UTF-8 characters, an error of `InvalidInput` /// is returned. pub fn find<P: AsRef<OsStr>>(&self, name: P) -> io::Result<Entry> { let name = name.as_ref().to_str().ok_or(io::Error::new( io::ErrorKind::InvalidInput, "name is not valid utf-8", ))?; let entry = self .entries()? .find(|entry| entry.name().eq_ignore_ascii_case(name.as_ref())) .ok_or(io::Error::new( io::ErrorKind::NotFound, format!("{}: not found", name), ))?; Ok(entry) } } #[repr(C, packed)] #[derive(Copy, Clone)] pub struct VFatRegularDirEntry { name: [u8; 8], extension: [u8; 3], attributes: u8, _nt_reserved: u8, _created_time_tenths_second: u8, created_time: u16, created_date: u16, accessed_date: u16, cluster_high: u16, modified_time: u16, modified_date: u16, cluster_low: u16, size: u32, } impl fmt::Debug for VFatRegularDirEntry { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("VFatRegularDirEntry") .field("name", &self.name()) .field("attributes", &self.attributes) .field("created_time", &self.created_time) .field("created_date", &self.created_date) .field("accessed_date", &self.accessed_date) .field("modified_time", &self.modified_time) .field("modified_date", &self.modified_date) .field("cluster", &self.cluster()) .finish() } } impl VFatRegularDirEntry { fn sentinel(&self) -> bool { self.name[0] == 0x00 } fn deleted(&self) -> bool { self.name[0] == 0x05 || self.name[0] == 0xE5 } fn cluster(&self) -> Cluster { Cluster::from(((self.cluster_high as u32) << 16) | (self.cluster_low as u32)) } fn created(&self) -> Timestamp { let date = Date::from_raw(self.created_date); let time = Time::from_raw(self.created_time); Timestamp::new(date, time) } fn accessed(&self) -> Timestamp { let date = Date::from_raw(self.accessed_date); Timestamp::new(date, Default::default()) } fn modified(&self) -> Timestamp { let date = Date::from_raw(self.modified_date); let time = Time::from_raw(self.modified_time); Timestamp::new(date, time) } fn attributes(&self) -> Attributes { Attributes::from_raw(self.attributes) } fn size(&self) -> u64 { self.size as u64 } fn metadata(&self) -> Metadata { let attributes = self.attributes(); let created = self.created(); let accessed = self.accessed(); let modified = self.modified(); let size = self.size(); Metadata { attributes, created, accessed, modified, size, } } fn name(&self) -> Option<String> { let &name_stop = &self.name[..] .iter() .position(|&c| c == 0x00 || c == b' ') .unwrap_or(self.name.len()); let &ext_stop = &self.extension[..] .iter() .position(|&c| c == 0x00 || c == b' ') .unwrap_or(self.extension.len()); let name = str::from_utf8(&self.name[..name_stop]).ok()?; let extension = str::from_utf8(&self.extension[..ext_stop]).ok()?; if name == "" { return None; } if extension != "" { Some(format!("{}.{}", name, extension)) } else { Some(format!("{}", name)) }
#[repr(C, packed)] #[derive(Copy, Clone, Debug)] pub struct VFatLfnDirEntry { seqno: u8, name_1: [u16; 5], attributes: u8, _reserved_1: u8, dos_checksum: u8, name_2: [u16; 6], _reserved_2: [u8; 2], name_3: [u16; 2], } #[repr(C, packed)] #[derive(Copy, Clone)] pub struct VFatUnknownDirEntry { _unknown_1: [u8; 11], attributes: u8, _unknown_2: [u8; 20], } pub union VFatDirEntry { unknown: VFatUnknownDirEntry, regular: VFatRegularDirEntry, long_filename: VFatLfnDirEntry, } impl From<VFatRegularDirEntry> for VFatEntry { fn from(regular: VFatRegularDirEntry) -> VFatEntry { VFatEntry::Regular(regular) } } impl From<VFatLfnDirEntry> for VFatEntry { fn from(lfn: VFatLfnDirEntry) -> VFatEntry { VFatEntry::Lfn(lfn) } } impl<'a> From<&'a VFatDirEntry> for VFatEntry { fn from(dir_entry: &'a VFatDirEntry) -> VFatEntry { let attributes = unsafe { dir_entry.unknown.attributes }; let attributes = Attributes::from_raw(attributes); unsafe { match (attributes.lfn(), dir_entry) { (true, &VFatDirEntry { long_filename }) => long_filename.into(), (false, &VFatDirEntry { regular }) => regular.into(), } } } } #[derive(Debug)] enum VFatEntry { Regular(VFatRegularDirEntry), Lfn(VFatLfnDirEntry), } impl VFatEntry { fn regular(&self) -> Option<&VFatRegularDirEntry> { if let &VFatEntry::Regular(ref reg) = self { Some(reg) } else { None } } fn lfn(&self) -> Option<&VFatLfnDirEntry> { if let &VFatEntry::Lfn(ref lfn) = self { Some(lfn) } else { None } } } impl traits::Dir for Dir { /// The type of entry stored in this directory. type Entry = Entry; /// An type that is an iterator over the entries in this directory. type Iter = DirIter; /// Returns an interator over the entries in this directory. fn entries(&self) -> io::Result<Self::Iter> { let mut vfat = self.vfat.borrow_mut(); let mut buf = vec![]; vfat.read_chain(self.start, &mut buf, None)?; let buf = unsafe { buf.cast::<VFatDirEntry>() }; Ok(DirIter::new(self.vfat.clone(), buf)) } } pub struct DirIter { vfat: Shared<VFat>, buf: Vec<VFatDirEntry>, current: usize, } impl DirIter { fn new(vfat: Shared<VFat>, buf: Vec<VFatDirEntry>) -> DirIter { DirIter { vfat, buf, current: 0, } } fn name_from_lfn(&self, lfn_start: usize, lfn_stop: usize) -> Option<String> { let mut entries: Vec<VFatLfnDirEntry> = (&self.buf[lfn_start..lfn_stop]) .iter() .rev() .map(|entry| entry.into()) // first ensure that we stop at the preceding regular in the array .take_while(|entry| { if let &VFatEntry::Lfn(_) = entry { true } else { false } }).filter_map(|entry| match entry.lfn() { Some(lfn) if lfn.seqno != 0xE5 => Some(*lfn), _ => None, }).collect(); entries.sort_by_key(|lfn| lfn.seqno); let mut name: Vec<u16> = vec![]; for &lfn in entries.iter() { name.extend(lfn.name_1.iter()); name.extend(lfn.name_2.iter()); name.extend(lfn.name_3.iter()); } let end = name .iter() .position(|&c| c == 0x0000u16) .unwrap_or(name.len()); let s = decode_utf16((&name[..end]).iter().cloned()) .map(|c| c.unwrap_or(REPLACEMENT_CHARACTER)) .collect::<String>(); if s.is_empty() { None } else { Some(s) } } } impl Iterator for DirIter { type Item = Entry; fn next(&mut self) -> Option<Self::Item> { if self.current >= self.buf.len() { return None; } let (regular_index, regular, name) = (&self.buf)[self.current..] .iter() .enumerate() .filter_map(|(i, union_entry)| { let index = self.current + i; let entry: VFatEntry = union_entry.into(); let regular = entry.regular()?; if !regular.deleted() && !regular.sentinel() { Some((index, *regular)) } else { None } }).next() .and_then(|(regular_index, regular)| { let name = if self.current < regular_index { self.name_from_lfn(self.current, regular_index) } else { None }; let name = name.or_else(|| regular.name())?; Some((regular_index, regular, name)) })?; self.current = regular_index + 1; let metadata = regular.metadata(); let start = regular.cluster(); let vfat = self.vfat.clone(); if metadata.attributes.directory() { Some(Entry::Dir(Dir::new(vfat, start, name, metadata))) } else { Some(Entry::File(File::new(vfat, start, name, metadata))) } } }
} }
random_line_split
dir.rs
use std::char::{decode_utf16, REPLACEMENT_CHARACTER}; use std::ffi::OsStr; use std::fmt; use std::io; use std::str; use traits::{self, Dir as DirTrait, Entry as EntryTrait}; use util::VecExt; use vfat::{Attributes, Date, Metadata, Time, Timestamp}; use vfat::{Cluster, Entry, File, Shared, VFat}; #[derive(Debug)] pub struct Dir { vfat: Shared<VFat>, start: Cluster, name: String, metadata: Metadata, } impl Dir { pub fn new(vfat: Shared<VFat>, start: Cluster, name: String, metadata: Metadata) -> Dir { Dir { vfat, start, name, metadata, } } pub fn name(&self) -> &str { &self.name } pub fn metadata(&self) -> &Metadata { &self.metadata } /// Finds the entry named `name` in `self` and returns it. Comparison is /// case-insensitive. /// /// # Errors /// /// If no entry with name `name` exists in `self`, an error of `NotFound` is /// returned. /// /// If `name` contains invalid UTF-8 characters, an error of `InvalidInput` /// is returned. pub fn find<P: AsRef<OsStr>>(&self, name: P) -> io::Result<Entry> { let name = name.as_ref().to_str().ok_or(io::Error::new( io::ErrorKind::InvalidInput, "name is not valid utf-8", ))?; let entry = self .entries()? .find(|entry| entry.name().eq_ignore_ascii_case(name.as_ref())) .ok_or(io::Error::new( io::ErrorKind::NotFound, format!("{}: not found", name), ))?; Ok(entry) } } #[repr(C, packed)] #[derive(Copy, Clone)] pub struct VFatRegularDirEntry { name: [u8; 8], extension: [u8; 3], attributes: u8, _nt_reserved: u8, _created_time_tenths_second: u8, created_time: u16, created_date: u16, accessed_date: u16, cluster_high: u16, modified_time: u16, modified_date: u16, cluster_low: u16, size: u32, } impl fmt::Debug for VFatRegularDirEntry { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("VFatRegularDirEntry") .field("name", &self.name()) .field("attributes", &self.attributes) .field("created_time", &self.created_time) .field("created_date", &self.created_date) .field("accessed_date", &self.accessed_date) .field("modified_time", &self.modified_time) .field("modified_date", &self.modified_date) .field("cluster", &self.cluster()) .finish() } } impl VFatRegularDirEntry { fn sentinel(&self) -> bool { self.name[0] == 0x00 } fn deleted(&self) -> bool { self.name[0] == 0x05 || self.name[0] == 0xE5 } fn cluster(&self) -> Cluster { Cluster::from(((self.cluster_high as u32) << 16) | (self.cluster_low as u32)) } fn created(&self) -> Timestamp { let date = Date::from_raw(self.created_date); let time = Time::from_raw(self.created_time); Timestamp::new(date, time) } fn accessed(&self) -> Timestamp { let date = Date::from_raw(self.accessed_date); Timestamp::new(date, Default::default()) } fn modified(&self) -> Timestamp { let date = Date::from_raw(self.modified_date); let time = Time::from_raw(self.modified_time); Timestamp::new(date, time) } fn attributes(&self) -> Attributes { Attributes::from_raw(self.attributes) } fn size(&self) -> u64 { self.size as u64 } fn metadata(&self) -> Metadata { let attributes = self.attributes(); let created = self.created(); let accessed = self.accessed(); let modified = self.modified(); let size = self.size(); Metadata { attributes, created, accessed, modified, size, } } fn name(&self) -> Option<String> { let &name_stop = &self.name[..] .iter() .position(|&c| c == 0x00 || c == b' ') .unwrap_or(self.name.len()); let &ext_stop = &self.extension[..] .iter() .position(|&c| c == 0x00 || c == b' ') .unwrap_or(self.extension.len()); let name = str::from_utf8(&self.name[..name_stop]).ok()?; let extension = str::from_utf8(&self.extension[..ext_stop]).ok()?; if name == "" { return None; } if extension != "" { Some(format!("{}.{}", name, extension)) } else { Some(format!("{}", name)) } } } #[repr(C, packed)] #[derive(Copy, Clone, Debug)] pub struct VFatLfnDirEntry { seqno: u8, name_1: [u16; 5], attributes: u8, _reserved_1: u8, dos_checksum: u8, name_2: [u16; 6], _reserved_2: [u8; 2], name_3: [u16; 2], } #[repr(C, packed)] #[derive(Copy, Clone)] pub struct VFatUnknownDirEntry { _unknown_1: [u8; 11], attributes: u8, _unknown_2: [u8; 20], } pub union VFatDirEntry { unknown: VFatUnknownDirEntry, regular: VFatRegularDirEntry, long_filename: VFatLfnDirEntry, } impl From<VFatRegularDirEntry> for VFatEntry { fn from(regular: VFatRegularDirEntry) -> VFatEntry { VFatEntry::Regular(regular) } } impl From<VFatLfnDirEntry> for VFatEntry { fn from(lfn: VFatLfnDirEntry) -> VFatEntry { VFatEntry::Lfn(lfn) } } impl<'a> From<&'a VFatDirEntry> for VFatEntry { fn from(dir_entry: &'a VFatDirEntry) -> VFatEntry { let attributes = unsafe { dir_entry.unknown.attributes }; let attributes = Attributes::from_raw(attributes); unsafe { match (attributes.lfn(), dir_entry) { (true, &VFatDirEntry { long_filename }) => long_filename.into(), (false, &VFatDirEntry { regular }) => regular.into(), } } } } #[derive(Debug)] enum VFatEntry { Regular(VFatRegularDirEntry), Lfn(VFatLfnDirEntry), } impl VFatEntry { fn regular(&self) -> Option<&VFatRegularDirEntry> { if let &VFatEntry::Regular(ref reg) = self { Some(reg) } else { None } } fn lfn(&self) -> Option<&VFatLfnDirEntry> { if let &VFatEntry::Lfn(ref lfn) = self { Some(lfn) } else { None } } } impl traits::Dir for Dir { /// The type of entry stored in this directory. type Entry = Entry; /// An type that is an iterator over the entries in this directory. type Iter = DirIter; /// Returns an interator over the entries in this directory. fn entries(&self) -> io::Result<Self::Iter> { let mut vfat = self.vfat.borrow_mut(); let mut buf = vec![]; vfat.read_chain(self.start, &mut buf, None)?; let buf = unsafe { buf.cast::<VFatDirEntry>() }; Ok(DirIter::new(self.vfat.clone(), buf)) } } pub struct DirIter { vfat: Shared<VFat>, buf: Vec<VFatDirEntry>, current: usize, } impl DirIter { fn new(vfat: Shared<VFat>, buf: Vec<VFatDirEntry>) -> DirIter { DirIter { vfat, buf, current: 0, } } fn name_from_lfn(&self, lfn_start: usize, lfn_stop: usize) -> Option<String> { let mut entries: Vec<VFatLfnDirEntry> = (&self.buf[lfn_start..lfn_stop]) .iter() .rev() .map(|entry| entry.into()) // first ensure that we stop at the preceding regular in the array .take_while(|entry| { if let &VFatEntry::Lfn(_) = entry { true } else { false } }).filter_map(|entry| match entry.lfn() { Some(lfn) if lfn.seqno != 0xE5 => Some(*lfn), _ => None, }).collect(); entries.sort_by_key(|lfn| lfn.seqno); let mut name: Vec<u16> = vec![]; for &lfn in entries.iter() { name.extend(lfn.name_1.iter()); name.extend(lfn.name_2.iter()); name.extend(lfn.name_3.iter()); } let end = name .iter() .position(|&c| c == 0x0000u16) .unwrap_or(name.len()); let s = decode_utf16((&name[..end]).iter().cloned()) .map(|c| c.unwrap_or(REPLACEMENT_CHARACTER)) .collect::<String>(); if s.is_empty() { None } else { Some(s) } } } impl Iterator for DirIter { type Item = Entry; fn
(&mut self) -> Option<Self::Item> { if self.current >= self.buf.len() { return None; } let (regular_index, regular, name) = (&self.buf)[self.current..] .iter() .enumerate() .filter_map(|(i, union_entry)| { let index = self.current + i; let entry: VFatEntry = union_entry.into(); let regular = entry.regular()?; if !regular.deleted() && !regular.sentinel() { Some((index, *regular)) } else { None } }).next() .and_then(|(regular_index, regular)| { let name = if self.current < regular_index { self.name_from_lfn(self.current, regular_index) } else { None }; let name = name.or_else(|| regular.name())?; Some((regular_index, regular, name)) })?; self.current = regular_index + 1; let metadata = regular.metadata(); let start = regular.cluster(); let vfat = self.vfat.clone(); if metadata.attributes.directory() { Some(Entry::Dir(Dir::new(vfat, start, name, metadata))) } else { Some(Entry::File(File::new(vfat, start, name, metadata))) } } }
next
identifier_name
dir.rs
use std::char::{decode_utf16, REPLACEMENT_CHARACTER}; use std::ffi::OsStr; use std::fmt; use std::io; use std::str; use traits::{self, Dir as DirTrait, Entry as EntryTrait}; use util::VecExt; use vfat::{Attributes, Date, Metadata, Time, Timestamp}; use vfat::{Cluster, Entry, File, Shared, VFat}; #[derive(Debug)] pub struct Dir { vfat: Shared<VFat>, start: Cluster, name: String, metadata: Metadata, } impl Dir { pub fn new(vfat: Shared<VFat>, start: Cluster, name: String, metadata: Metadata) -> Dir { Dir { vfat, start, name, metadata, } } pub fn name(&self) -> &str { &self.name } pub fn metadata(&self) -> &Metadata { &self.metadata } /// Finds the entry named `name` in `self` and returns it. Comparison is /// case-insensitive. /// /// # Errors /// /// If no entry with name `name` exists in `self`, an error of `NotFound` is /// returned. /// /// If `name` contains invalid UTF-8 characters, an error of `InvalidInput` /// is returned. pub fn find<P: AsRef<OsStr>>(&self, name: P) -> io::Result<Entry> { let name = name.as_ref().to_str().ok_or(io::Error::new( io::ErrorKind::InvalidInput, "name is not valid utf-8", ))?; let entry = self .entries()? .find(|entry| entry.name().eq_ignore_ascii_case(name.as_ref())) .ok_or(io::Error::new( io::ErrorKind::NotFound, format!("{}: not found", name), ))?; Ok(entry) } } #[repr(C, packed)] #[derive(Copy, Clone)] pub struct VFatRegularDirEntry { name: [u8; 8], extension: [u8; 3], attributes: u8, _nt_reserved: u8, _created_time_tenths_second: u8, created_time: u16, created_date: u16, accessed_date: u16, cluster_high: u16, modified_time: u16, modified_date: u16, cluster_low: u16, size: u32, } impl fmt::Debug for VFatRegularDirEntry { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("VFatRegularDirEntry") .field("name", &self.name()) .field("attributes", &self.attributes) .field("created_time", &self.created_time) .field("created_date", &self.created_date) .field("accessed_date", &self.accessed_date) .field("modified_time", &self.modified_time) .field("modified_date", &self.modified_date) .field("cluster", &self.cluster()) .finish() } } impl VFatRegularDirEntry { fn sentinel(&self) -> bool { self.name[0] == 0x00 } fn deleted(&self) -> bool
fn cluster(&self) -> Cluster { Cluster::from(((self.cluster_high as u32) << 16) | (self.cluster_low as u32)) } fn created(&self) -> Timestamp { let date = Date::from_raw(self.created_date); let time = Time::from_raw(self.created_time); Timestamp::new(date, time) } fn accessed(&self) -> Timestamp { let date = Date::from_raw(self.accessed_date); Timestamp::new(date, Default::default()) } fn modified(&self) -> Timestamp { let date = Date::from_raw(self.modified_date); let time = Time::from_raw(self.modified_time); Timestamp::new(date, time) } fn attributes(&self) -> Attributes { Attributes::from_raw(self.attributes) } fn size(&self) -> u64 { self.size as u64 } fn metadata(&self) -> Metadata { let attributes = self.attributes(); let created = self.created(); let accessed = self.accessed(); let modified = self.modified(); let size = self.size(); Metadata { attributes, created, accessed, modified, size, } } fn name(&self) -> Option<String> { let &name_stop = &self.name[..] .iter() .position(|&c| c == 0x00 || c == b' ') .unwrap_or(self.name.len()); let &ext_stop = &self.extension[..] .iter() .position(|&c| c == 0x00 || c == b' ') .unwrap_or(self.extension.len()); let name = str::from_utf8(&self.name[..name_stop]).ok()?; let extension = str::from_utf8(&self.extension[..ext_stop]).ok()?; if name == "" { return None; } if extension != "" { Some(format!("{}.{}", name, extension)) } else { Some(format!("{}", name)) } } } #[repr(C, packed)] #[derive(Copy, Clone, Debug)] pub struct VFatLfnDirEntry { seqno: u8, name_1: [u16; 5], attributes: u8, _reserved_1: u8, dos_checksum: u8, name_2: [u16; 6], _reserved_2: [u8; 2], name_3: [u16; 2], } #[repr(C, packed)] #[derive(Copy, Clone)] pub struct VFatUnknownDirEntry { _unknown_1: [u8; 11], attributes: u8, _unknown_2: [u8; 20], } pub union VFatDirEntry { unknown: VFatUnknownDirEntry, regular: VFatRegularDirEntry, long_filename: VFatLfnDirEntry, } impl From<VFatRegularDirEntry> for VFatEntry { fn from(regular: VFatRegularDirEntry) -> VFatEntry { VFatEntry::Regular(regular) } } impl From<VFatLfnDirEntry> for VFatEntry { fn from(lfn: VFatLfnDirEntry) -> VFatEntry { VFatEntry::Lfn(lfn) } } impl<'a> From<&'a VFatDirEntry> for VFatEntry { fn from(dir_entry: &'a VFatDirEntry) -> VFatEntry { let attributes = unsafe { dir_entry.unknown.attributes }; let attributes = Attributes::from_raw(attributes); unsafe { match (attributes.lfn(), dir_entry) { (true, &VFatDirEntry { long_filename }) => long_filename.into(), (false, &VFatDirEntry { regular }) => regular.into(), } } } } #[derive(Debug)] enum VFatEntry { Regular(VFatRegularDirEntry), Lfn(VFatLfnDirEntry), } impl VFatEntry { fn regular(&self) -> Option<&VFatRegularDirEntry> { if let &VFatEntry::Regular(ref reg) = self { Some(reg) } else { None } } fn lfn(&self) -> Option<&VFatLfnDirEntry> { if let &VFatEntry::Lfn(ref lfn) = self { Some(lfn) } else { None } } } impl traits::Dir for Dir { /// The type of entry stored in this directory. type Entry = Entry; /// An type that is an iterator over the entries in this directory. type Iter = DirIter; /// Returns an interator over the entries in this directory. fn entries(&self) -> io::Result<Self::Iter> { let mut vfat = self.vfat.borrow_mut(); let mut buf = vec![]; vfat.read_chain(self.start, &mut buf, None)?; let buf = unsafe { buf.cast::<VFatDirEntry>() }; Ok(DirIter::new(self.vfat.clone(), buf)) } } pub struct DirIter { vfat: Shared<VFat>, buf: Vec<VFatDirEntry>, current: usize, } impl DirIter { fn new(vfat: Shared<VFat>, buf: Vec<VFatDirEntry>) -> DirIter { DirIter { vfat, buf, current: 0, } } fn name_from_lfn(&self, lfn_start: usize, lfn_stop: usize) -> Option<String> { let mut entries: Vec<VFatLfnDirEntry> = (&self.buf[lfn_start..lfn_stop]) .iter() .rev() .map(|entry| entry.into()) // first ensure that we stop at the preceding regular in the array .take_while(|entry| { if let &VFatEntry::Lfn(_) = entry { true } else { false } }).filter_map(|entry| match entry.lfn() { Some(lfn) if lfn.seqno != 0xE5 => Some(*lfn), _ => None, }).collect(); entries.sort_by_key(|lfn| lfn.seqno); let mut name: Vec<u16> = vec![]; for &lfn in entries.iter() { name.extend(lfn.name_1.iter()); name.extend(lfn.name_2.iter()); name.extend(lfn.name_3.iter()); } let end = name .iter() .position(|&c| c == 0x0000u16) .unwrap_or(name.len()); let s = decode_utf16((&name[..end]).iter().cloned()) .map(|c| c.unwrap_or(REPLACEMENT_CHARACTER)) .collect::<String>(); if s.is_empty() { None } else { Some(s) } } } impl Iterator for DirIter { type Item = Entry; fn next(&mut self) -> Option<Self::Item> { if self.current >= self.buf.len() { return None; } let (regular_index, regular, name) = (&self.buf)[self.current..] .iter() .enumerate() .filter_map(|(i, union_entry)| { let index = self.current + i; let entry: VFatEntry = union_entry.into(); let regular = entry.regular()?; if !regular.deleted() && !regular.sentinel() { Some((index, *regular)) } else { None } }).next() .and_then(|(regular_index, regular)| { let name = if self.current < regular_index { self.name_from_lfn(self.current, regular_index) } else { None }; let name = name.or_else(|| regular.name())?; Some((regular_index, regular, name)) })?; self.current = regular_index + 1; let metadata = regular.metadata(); let start = regular.cluster(); let vfat = self.vfat.clone(); if metadata.attributes.directory() { Some(Entry::Dir(Dir::new(vfat, start, name, metadata))) } else { Some(Entry::File(File::new(vfat, start, name, metadata))) } } }
{ self.name[0] == 0x05 || self.name[0] == 0xE5 }
identifier_body
mod.rs
use amethyst::{ animation::{AnimationBundle, VertexSkinningBundle}, assets::{ AssetStorage, Handle, Loader, PrefabLoader, PrefabLoaderSystemDesc, RonFormat, }, controls::{ControlTagPrefab, FlyControlBundle}, core::{ HideHierarchySystemDesc, Parent, transform::{ Transform, TransformBundle, }, }, ecs::{ prelude::*, storage::{ GenericReadStorage, GenericWriteStorage, }, }, audio::AudioBundle, Error, gltf::GltfSceneLoaderSystemDesc, input::{InputBundle, StringBindings}, prelude::*, renderer::{ plugins::{RenderPbr3D, RenderSkybox, RenderToWindow}, RenderingBundle, Texture, types::{DefaultBackend, Mesh, MeshData}, }, ui::{RenderUi, UiBundle}, utils::{ application_root_dir, auto_fov::AutoFovSystem, tag::{Tag, TagFinder}, }, }; // Our own rendering plugins. use amethyst_particle::ParticleRender; use combat_render::flash::pass::FlashRender; use space_render::AtmosphereRender; use space_render::cosmos::{Cosmos, CosmosRender}; use space_render::StarRender; pub use action::Action; pub use add_to_limit::*; use crate::game; use crate::game::combat::process::Principal; use crate::game::ui::font::GameFonts; use crate::state::*; use rand::Rng; use crate::game::character::{CharacterStore, CharacterRole}; use crate::game::combat::ability::{AbilityList, AbilityUsability}; use crate::game::combat::ability::charge::ChargeAbility; use crate::game::combat::ability::spawn::SpawnAbility; use crate::game::combat::ability::hack::HackAbility; use crate::game::combat::ability::barrage::BarrageAbility; use crate::game::combat::ability::twin_shot::TwinShotAbility; use crate::game::combat::ability::focused_charge::FocusedChargeAbility; use crate::game::combat::ability::snipe::SnipeAbility; use crate::game::combat::ability::annihilate::AnnihilateAbility; use crate::game::map::CombatStore; use crate::game::combat::{CombatData, Wave}; use crate::game::CoreGameBundle; use crate::core::rebuild_pass::RebuildPlugin; pub mod action; pub mod activity; pub mod rebuild_pass; mod add_to_limit; /// Builds the application object for the game. /// This contains the built ECS data, which has all the required logic registered. pub fn build_application<'a, 'b, 'c, S: State<AggregateData<'a, 'b>, StateEvent> + 'c>(initial_state: S) -> Result<Application<'c, AggregateData<'a, 'b>>, Error> { // Get the application root directory for asset loading. let app_root = application_root_dir()?; // Add our meshes directory to the asset loader. let assets_dir = app_root.join("assets"); // Load display config let display_config_path = app_root.join("config\\display.ron"); let mut world: World = World::new(); let game_data = AggregateDataBuilder::new() // GAME SYSTEMS - these are only run when actually 'in' the game world, such as game logic and input systems. .with_combat( GameDataBuilder::new() // Our custom core game bundle data. .with_bundle(game::combat::CombatBundle)? .with_bundle(game::character::CharacterBundle)? ) .with_map( GameDataBuilder::new() // Our custom core game bundle data. .with_bundle(game::map::MapBundle)? ) // UI SYSTEMS - these should be ui specific and should not be required if not running with a UI. // TODO: implement UI .with_ui( GameDataBuilder::new() .with_system_desc( crate::game::control::camera::menu::MenuCameraSystemDesc::default(), "menu_camera", &[], ) ) // CORE SYSTEMS - these should be present at all times during the lifecycle of the game. // This allows us to load assets at any time, even when not in the game state. // FOV will update wen window is resized. //.with_core(AutoFovSystem::default(), "auto_fov", &[]) .with_core( GameDataBuilder::new() // Input bundle. .with_bundle(InputBundle::<StringBindings>::new())? // Automatic FOV and aspect ration modifications .with( AutoFovSystem::new(), "auto_fov", &[], ) // Register drone loader. // .with_system_desc( // PrefabLoaderSystemDesc::<game::drone::DronePrefabData>::default(), // "drone_loader", // &[], // ) // Register map loader. .with_system_desc( PrefabLoaderSystemDesc::<game::map::MapPrefabData>::default(), "map_loader", &[], ) // Register scene loader. .with_system_desc( PrefabLoaderSystemDesc::<game::map::WorldPrefabData>::default(), "scene_loader", &[], ) // Register character loader. .with_system_desc( PrefabLoaderSystemDesc::<game::character::CharacterPrefabData>::default(), "character_loader", &[], ) // 3D asset loading using the gltf format. .with_system_desc( GltfSceneLoaderSystemDesc::default(), "gltf_loader", &["map_loader", "scene_loader", "character_loader"], // This is important so that entity instantiation is performed in a single frame. ) // Animation system. // .with_bundle( // AnimationBundle::<usize, Transform>::new("animation_control", "sampler_interpolation") // .with_dep(&["gltf_loader"]), // )? // Basic transforms. .with_bundle(TransformBundle::new().with_dep(&[ //"animation_control", //"sampler_interpolation", ]))? // Vertex skinning (applying bones to vertices) and manipulating. // .with_bundle(VertexSkinningBundle::new().with_dep(&[ // "transform_system", // //"animation_control", // "sampler_interpolation", // ]))? // Amethyst core UI bundle - this requires other systems within the dispatcher to function properly. .with_bundle(UiBundle::<StringBindings>::new())? // Audio bundle .with_bundle(AudioBundle::default())? // RENDER SYSTEMS - these should be required only for rendering abstract data to the screen. // Add the render bundle. .with_bundle(CoreGameBundle)? .with_system_desc( HideHierarchySystemDesc::default(), "hide_hierarchy", &[], ) .with_bundle( RenderingBundle::<DefaultBackend>::new() // Clear color is black - for custom background colors (e.g. for ui) extra geometry will need to be rendered. .with_plugin(RenderToWindow::from_config_path(display_config_path)?.with_clear([0.0, 0.0, 0.0, 0.0])) .with_plugin(RenderPbr3D::default()) // Our own custom plugins .with_plugin(CosmosRender::new(Some(Cosmos::default()))) .with_plugin(FlashRender::new("textures/flash_billboard.png")) .with_plugin(StarRender::new("textures/star_glow.png")) .with_plugin(AtmosphereRender::default()) .with_plugin(ParticleRender::default()) .with_plugin(RebuildPlugin::default()) // Ui rendering. .with_plugin(RenderUi::default()) )? ); Ok(Application::new(assets_dir, initial_state, game_data)?) } pub fn get_root<'s, 'a, T, P, C>( parents: &P, components: &'a C, entity: Entity, ) -> Option<(&'a T, Entity)> where T: Component, P: GenericReadStorage<Component=Parent>, C: GenericReadStorage<Component=T> { if let Some(component) = components.get(entity) { Some((component, entity)) } else { let parent_ent: Entity = parents.get(entity)?.entity; get_root( parents, components, parent_ent, ) } } pub fn get_root_cloned<'s, 'a, T, P, C>( parents: &P, components: &'a C, entity: Entity, ) -> Option<(T, Entity)> where T: Component + Clone, P: GenericReadStorage<Component=Parent>, C: GenericReadStorage<Component=T> { if let Some(component) = components.get(entity) { Some((component.clone(), entity)) } else {
let parent_ent: Entity = parents.get(entity)?.entity; get_root_cloned( parents, components, parent_ent, ) } } pub fn get_root_mut<'s, 'a, T, P, C>( parents: &P, components: &'a mut C, entity: Entity, ) -> Option<(&'a mut T, Entity)> where T: Component, P: GenericReadStorage<Component=Parent>, C: GenericWriteStorage<Component=T> { if components.get_mut(entity).is_some() { Some((components.get_mut(entity).unwrap(), entity)) } else { let parent_ent: Entity = parents.get(entity)?.entity; get_root_mut( parents, components, parent_ent, ) } } pub fn load_texture(world: &mut World, path: impl Into<String>) -> Handle<Texture> { if !world.has_value::<AssetStorage::<Texture>>() { world.insert(AssetStorage::<Texture>::new()); } let loader = world.read_resource::<Loader>(); loader.load( path, amethyst::renderer::formats::texture::ImageFormat::default(), (), &world.read_resource::<AssetStorage<Texture>>(), ) } /// Rolls a 'dice' based on the specified chance. pub fn roll(chance: f32) -> bool { let mut rng = rand::thread_rng(); let value: f32 = rng.gen_range(0.0, 1.0); if value < chance { true } else { false } } pub fn select_rng(values: &[f32]) -> Option<usize> { if values.is_empty() { return None; } let mut total: f32 = 0.0; for value in values { total += *value; } if total <= 0.0 { return None; } let mut rng = rand::thread_rng(); let random: f32 = rng.gen_range(0.0, total); let mut current: f32 = 0.0; for (i, value) in values.iter().enumerate() { if *value != 0.0 { let next: f32 = current + *value; if random >= current && random < next { return Some(i); } current = next; } } panic!("No value selected. The slice must contain non zero values..."); }
random_line_split
mod.rs
use amethyst::{ animation::{AnimationBundle, VertexSkinningBundle}, assets::{ AssetStorage, Handle, Loader, PrefabLoader, PrefabLoaderSystemDesc, RonFormat, }, controls::{ControlTagPrefab, FlyControlBundle}, core::{ HideHierarchySystemDesc, Parent, transform::{ Transform, TransformBundle, }, }, ecs::{ prelude::*, storage::{ GenericReadStorage, GenericWriteStorage, }, }, audio::AudioBundle, Error, gltf::GltfSceneLoaderSystemDesc, input::{InputBundle, StringBindings}, prelude::*, renderer::{ plugins::{RenderPbr3D, RenderSkybox, RenderToWindow}, RenderingBundle, Texture, types::{DefaultBackend, Mesh, MeshData}, }, ui::{RenderUi, UiBundle}, utils::{ application_root_dir, auto_fov::AutoFovSystem, tag::{Tag, TagFinder}, }, }; // Our own rendering plugins. use amethyst_particle::ParticleRender; use combat_render::flash::pass::FlashRender; use space_render::AtmosphereRender; use space_render::cosmos::{Cosmos, CosmosRender}; use space_render::StarRender; pub use action::Action; pub use add_to_limit::*; use crate::game; use crate::game::combat::process::Principal; use crate::game::ui::font::GameFonts; use crate::state::*; use rand::Rng; use crate::game::character::{CharacterStore, CharacterRole}; use crate::game::combat::ability::{AbilityList, AbilityUsability}; use crate::game::combat::ability::charge::ChargeAbility; use crate::game::combat::ability::spawn::SpawnAbility; use crate::game::combat::ability::hack::HackAbility; use crate::game::combat::ability::barrage::BarrageAbility; use crate::game::combat::ability::twin_shot::TwinShotAbility; use crate::game::combat::ability::focused_charge::FocusedChargeAbility; use crate::game::combat::ability::snipe::SnipeAbility; use crate::game::combat::ability::annihilate::AnnihilateAbility; use crate::game::map::CombatStore; use crate::game::combat::{CombatData, Wave}; use crate::game::CoreGameBundle; use crate::core::rebuild_pass::RebuildPlugin; pub mod action; pub mod activity; pub mod rebuild_pass; mod add_to_limit; /// Builds the application object for the game. /// This contains the built ECS data, which has all the required logic registered. pub fn build_application<'a, 'b, 'c, S: State<AggregateData<'a, 'b>, StateEvent> + 'c>(initial_state: S) -> Result<Application<'c, AggregateData<'a, 'b>>, Error> { // Get the application root directory for asset loading. let app_root = application_root_dir()?; // Add our meshes directory to the asset loader. let assets_dir = app_root.join("assets"); // Load display config let display_config_path = app_root.join("config\\display.ron"); let mut world: World = World::new(); let game_data = AggregateDataBuilder::new() // GAME SYSTEMS - these are only run when actually 'in' the game world, such as game logic and input systems. .with_combat( GameDataBuilder::new() // Our custom core game bundle data. .with_bundle(game::combat::CombatBundle)? .with_bundle(game::character::CharacterBundle)? ) .with_map( GameDataBuilder::new() // Our custom core game bundle data. .with_bundle(game::map::MapBundle)? ) // UI SYSTEMS - these should be ui specific and should not be required if not running with a UI. // TODO: implement UI .with_ui( GameDataBuilder::new() .with_system_desc( crate::game::control::camera::menu::MenuCameraSystemDesc::default(), "menu_camera", &[], ) ) // CORE SYSTEMS - these should be present at all times during the lifecycle of the game. // This allows us to load assets at any time, even when not in the game state. // FOV will update wen window is resized. //.with_core(AutoFovSystem::default(), "auto_fov", &[]) .with_core( GameDataBuilder::new() // Input bundle. .with_bundle(InputBundle::<StringBindings>::new())? // Automatic FOV and aspect ration modifications .with( AutoFovSystem::new(), "auto_fov", &[], ) // Register drone loader. // .with_system_desc( // PrefabLoaderSystemDesc::<game::drone::DronePrefabData>::default(), // "drone_loader", // &[], // ) // Register map loader. .with_system_desc( PrefabLoaderSystemDesc::<game::map::MapPrefabData>::default(), "map_loader", &[], ) // Register scene loader. .with_system_desc( PrefabLoaderSystemDesc::<game::map::WorldPrefabData>::default(), "scene_loader", &[], ) // Register character loader. .with_system_desc( PrefabLoaderSystemDesc::<game::character::CharacterPrefabData>::default(), "character_loader", &[], ) // 3D asset loading using the gltf format. .with_system_desc( GltfSceneLoaderSystemDesc::default(), "gltf_loader", &["map_loader", "scene_loader", "character_loader"], // This is important so that entity instantiation is performed in a single frame. ) // Animation system. // .with_bundle( // AnimationBundle::<usize, Transform>::new("animation_control", "sampler_interpolation") // .with_dep(&["gltf_loader"]), // )? // Basic transforms. .with_bundle(TransformBundle::new().with_dep(&[ //"animation_control", //"sampler_interpolation", ]))? // Vertex skinning (applying bones to vertices) and manipulating. // .with_bundle(VertexSkinningBundle::new().with_dep(&[ // "transform_system", // //"animation_control", // "sampler_interpolation", // ]))? // Amethyst core UI bundle - this requires other systems within the dispatcher to function properly. .with_bundle(UiBundle::<StringBindings>::new())? // Audio bundle .with_bundle(AudioBundle::default())? // RENDER SYSTEMS - these should be required only for rendering abstract data to the screen. // Add the render bundle. .with_bundle(CoreGameBundle)? .with_system_desc( HideHierarchySystemDesc::default(), "hide_hierarchy", &[], ) .with_bundle( RenderingBundle::<DefaultBackend>::new() // Clear color is black - for custom background colors (e.g. for ui) extra geometry will need to be rendered. .with_plugin(RenderToWindow::from_config_path(display_config_path)?.with_clear([0.0, 0.0, 0.0, 0.0])) .with_plugin(RenderPbr3D::default()) // Our own custom plugins .with_plugin(CosmosRender::new(Some(Cosmos::default()))) .with_plugin(FlashRender::new("textures/flash_billboard.png")) .with_plugin(StarRender::new("textures/star_glow.png")) .with_plugin(AtmosphereRender::default()) .with_plugin(ParticleRender::default()) .with_plugin(RebuildPlugin::default()) // Ui rendering. .with_plugin(RenderUi::default()) )? ); Ok(Application::new(assets_dir, initial_state, game_data)?) } pub fn get_root<'s, 'a, T, P, C>( parents: &P, components: &'a C, entity: Entity, ) -> Option<(&'a T, Entity)> where T: Component, P: GenericReadStorage<Component=Parent>, C: GenericReadStorage<Component=T> { if let Some(component) = components.get(entity) { Some((component, entity)) } else { let parent_ent: Entity = parents.get(entity)?.entity; get_root( parents, components, parent_ent, ) } } pub fn get_root_cloned<'s, 'a, T, P, C>( parents: &P, components: &'a C, entity: Entity, ) -> Option<(T, Entity)> where T: Component + Clone, P: GenericReadStorage<Component=Parent>, C: GenericReadStorage<Component=T> { if let Some(component) = components.get(entity)
else { let parent_ent: Entity = parents.get(entity)?.entity; get_root_cloned( parents, components, parent_ent, ) } } pub fn get_root_mut<'s, 'a, T, P, C>( parents: &P, components: &'a mut C, entity: Entity, ) -> Option<(&'a mut T, Entity)> where T: Component, P: GenericReadStorage<Component=Parent>, C: GenericWriteStorage<Component=T> { if components.get_mut(entity).is_some() { Some((components.get_mut(entity).unwrap(), entity)) } else { let parent_ent: Entity = parents.get(entity)?.entity; get_root_mut( parents, components, parent_ent, ) } } pub fn load_texture(world: &mut World, path: impl Into<String>) -> Handle<Texture> { if !world.has_value::<AssetStorage::<Texture>>() { world.insert(AssetStorage::<Texture>::new()); } let loader = world.read_resource::<Loader>(); loader.load( path, amethyst::renderer::formats::texture::ImageFormat::default(), (), &world.read_resource::<AssetStorage<Texture>>(), ) } /// Rolls a 'dice' based on the specified chance. pub fn roll(chance: f32) -> bool { let mut rng = rand::thread_rng(); let value: f32 = rng.gen_range(0.0, 1.0); if value < chance { true } else { false } } pub fn select_rng(values: &[f32]) -> Option<usize> { if values.is_empty() { return None; } let mut total: f32 = 0.0; for value in values { total += *value; } if total <= 0.0 { return None; } let mut rng = rand::thread_rng(); let random: f32 = rng.gen_range(0.0, total); let mut current: f32 = 0.0; for (i, value) in values.iter().enumerate() { if *value != 0.0 { let next: f32 = current + *value; if random >= current && random < next { return Some(i); } current = next; } } panic!("No value selected. The slice must contain non zero values..."); }
{ Some((component.clone(), entity)) }
conditional_block
mod.rs
use amethyst::{ animation::{AnimationBundle, VertexSkinningBundle}, assets::{ AssetStorage, Handle, Loader, PrefabLoader, PrefabLoaderSystemDesc, RonFormat, }, controls::{ControlTagPrefab, FlyControlBundle}, core::{ HideHierarchySystemDesc, Parent, transform::{ Transform, TransformBundle, }, }, ecs::{ prelude::*, storage::{ GenericReadStorage, GenericWriteStorage, }, }, audio::AudioBundle, Error, gltf::GltfSceneLoaderSystemDesc, input::{InputBundle, StringBindings}, prelude::*, renderer::{ plugins::{RenderPbr3D, RenderSkybox, RenderToWindow}, RenderingBundle, Texture, types::{DefaultBackend, Mesh, MeshData}, }, ui::{RenderUi, UiBundle}, utils::{ application_root_dir, auto_fov::AutoFovSystem, tag::{Tag, TagFinder}, }, }; // Our own rendering plugins. use amethyst_particle::ParticleRender; use combat_render::flash::pass::FlashRender; use space_render::AtmosphereRender; use space_render::cosmos::{Cosmos, CosmosRender}; use space_render::StarRender; pub use action::Action; pub use add_to_limit::*; use crate::game; use crate::game::combat::process::Principal; use crate::game::ui::font::GameFonts; use crate::state::*; use rand::Rng; use crate::game::character::{CharacterStore, CharacterRole}; use crate::game::combat::ability::{AbilityList, AbilityUsability}; use crate::game::combat::ability::charge::ChargeAbility; use crate::game::combat::ability::spawn::SpawnAbility; use crate::game::combat::ability::hack::HackAbility; use crate::game::combat::ability::barrage::BarrageAbility; use crate::game::combat::ability::twin_shot::TwinShotAbility; use crate::game::combat::ability::focused_charge::FocusedChargeAbility; use crate::game::combat::ability::snipe::SnipeAbility; use crate::game::combat::ability::annihilate::AnnihilateAbility; use crate::game::map::CombatStore; use crate::game::combat::{CombatData, Wave}; use crate::game::CoreGameBundle; use crate::core::rebuild_pass::RebuildPlugin; pub mod action; pub mod activity; pub mod rebuild_pass; mod add_to_limit; /// Builds the application object for the game. /// This contains the built ECS data, which has all the required logic registered. pub fn build_application<'a, 'b, 'c, S: State<AggregateData<'a, 'b>, StateEvent> + 'c>(initial_state: S) -> Result<Application<'c, AggregateData<'a, 'b>>, Error> { // Get the application root directory for asset loading. let app_root = application_root_dir()?; // Add our meshes directory to the asset loader. let assets_dir = app_root.join("assets"); // Load display config let display_config_path = app_root.join("config\\display.ron"); let mut world: World = World::new(); let game_data = AggregateDataBuilder::new() // GAME SYSTEMS - these are only run when actually 'in' the game world, such as game logic and input systems. .with_combat( GameDataBuilder::new() // Our custom core game bundle data. .with_bundle(game::combat::CombatBundle)? .with_bundle(game::character::CharacterBundle)? ) .with_map( GameDataBuilder::new() // Our custom core game bundle data. .with_bundle(game::map::MapBundle)? ) // UI SYSTEMS - these should be ui specific and should not be required if not running with a UI. // TODO: implement UI .with_ui( GameDataBuilder::new() .with_system_desc( crate::game::control::camera::menu::MenuCameraSystemDesc::default(), "menu_camera", &[], ) ) // CORE SYSTEMS - these should be present at all times during the lifecycle of the game. // This allows us to load assets at any time, even when not in the game state. // FOV will update wen window is resized. //.with_core(AutoFovSystem::default(), "auto_fov", &[]) .with_core( GameDataBuilder::new() // Input bundle. .with_bundle(InputBundle::<StringBindings>::new())? // Automatic FOV and aspect ration modifications .with( AutoFovSystem::new(), "auto_fov", &[], ) // Register drone loader. // .with_system_desc( // PrefabLoaderSystemDesc::<game::drone::DronePrefabData>::default(), // "drone_loader", // &[], // ) // Register map loader. .with_system_desc( PrefabLoaderSystemDesc::<game::map::MapPrefabData>::default(), "map_loader", &[], ) // Register scene loader. .with_system_desc( PrefabLoaderSystemDesc::<game::map::WorldPrefabData>::default(), "scene_loader", &[], ) // Register character loader. .with_system_desc( PrefabLoaderSystemDesc::<game::character::CharacterPrefabData>::default(), "character_loader", &[], ) // 3D asset loading using the gltf format. .with_system_desc( GltfSceneLoaderSystemDesc::default(), "gltf_loader", &["map_loader", "scene_loader", "character_loader"], // This is important so that entity instantiation is performed in a single frame. ) // Animation system. // .with_bundle( // AnimationBundle::<usize, Transform>::new("animation_control", "sampler_interpolation") // .with_dep(&["gltf_loader"]), // )? // Basic transforms. .with_bundle(TransformBundle::new().with_dep(&[ //"animation_control", //"sampler_interpolation", ]))? // Vertex skinning (applying bones to vertices) and manipulating. // .with_bundle(VertexSkinningBundle::new().with_dep(&[ // "transform_system", // //"animation_control", // "sampler_interpolation", // ]))? // Amethyst core UI bundle - this requires other systems within the dispatcher to function properly. .with_bundle(UiBundle::<StringBindings>::new())? // Audio bundle .with_bundle(AudioBundle::default())? // RENDER SYSTEMS - these should be required only for rendering abstract data to the screen. // Add the render bundle. .with_bundle(CoreGameBundle)? .with_system_desc( HideHierarchySystemDesc::default(), "hide_hierarchy", &[], ) .with_bundle( RenderingBundle::<DefaultBackend>::new() // Clear color is black - for custom background colors (e.g. for ui) extra geometry will need to be rendered. .with_plugin(RenderToWindow::from_config_path(display_config_path)?.with_clear([0.0, 0.0, 0.0, 0.0])) .with_plugin(RenderPbr3D::default()) // Our own custom plugins .with_plugin(CosmosRender::new(Some(Cosmos::default()))) .with_plugin(FlashRender::new("textures/flash_billboard.png")) .with_plugin(StarRender::new("textures/star_glow.png")) .with_plugin(AtmosphereRender::default()) .with_plugin(ParticleRender::default()) .with_plugin(RebuildPlugin::default()) // Ui rendering. .with_plugin(RenderUi::default()) )? ); Ok(Application::new(assets_dir, initial_state, game_data)?) } pub fn get_root<'s, 'a, T, P, C>( parents: &P, components: &'a C, entity: Entity, ) -> Option<(&'a T, Entity)> where T: Component, P: GenericReadStorage<Component=Parent>, C: GenericReadStorage<Component=T> { if let Some(component) = components.get(entity) { Some((component, entity)) } else { let parent_ent: Entity = parents.get(entity)?.entity; get_root( parents, components, parent_ent, ) } } pub fn get_root_cloned<'s, 'a, T, P, C>( parents: &P, components: &'a C, entity: Entity, ) -> Option<(T, Entity)> where T: Component + Clone, P: GenericReadStorage<Component=Parent>, C: GenericReadStorage<Component=T>
pub fn get_root_mut<'s, 'a, T, P, C>( parents: &P, components: &'a mut C, entity: Entity, ) -> Option<(&'a mut T, Entity)> where T: Component, P: GenericReadStorage<Component=Parent>, C: GenericWriteStorage<Component=T> { if components.get_mut(entity).is_some() { Some((components.get_mut(entity).unwrap(), entity)) } else { let parent_ent: Entity = parents.get(entity)?.entity; get_root_mut( parents, components, parent_ent, ) } } pub fn load_texture(world: &mut World, path: impl Into<String>) -> Handle<Texture> { if !world.has_value::<AssetStorage::<Texture>>() { world.insert(AssetStorage::<Texture>::new()); } let loader = world.read_resource::<Loader>(); loader.load( path, amethyst::renderer::formats::texture::ImageFormat::default(), (), &world.read_resource::<AssetStorage<Texture>>(), ) } /// Rolls a 'dice' based on the specified chance. pub fn roll(chance: f32) -> bool { let mut rng = rand::thread_rng(); let value: f32 = rng.gen_range(0.0, 1.0); if value < chance { true } else { false } } pub fn select_rng(values: &[f32]) -> Option<usize> { if values.is_empty() { return None; } let mut total: f32 = 0.0; for value in values { total += *value; } if total <= 0.0 { return None; } let mut rng = rand::thread_rng(); let random: f32 = rng.gen_range(0.0, total); let mut current: f32 = 0.0; for (i, value) in values.iter().enumerate() { if *value != 0.0 { let next: f32 = current + *value; if random >= current && random < next { return Some(i); } current = next; } } panic!("No value selected. The slice must contain non zero values..."); }
{ if let Some(component) = components.get(entity) { Some((component.clone(), entity)) } else { let parent_ent: Entity = parents.get(entity)?.entity; get_root_cloned( parents, components, parent_ent, ) } }
identifier_body
mod.rs
use amethyst::{ animation::{AnimationBundle, VertexSkinningBundle}, assets::{ AssetStorage, Handle, Loader, PrefabLoader, PrefabLoaderSystemDesc, RonFormat, }, controls::{ControlTagPrefab, FlyControlBundle}, core::{ HideHierarchySystemDesc, Parent, transform::{ Transform, TransformBundle, }, }, ecs::{ prelude::*, storage::{ GenericReadStorage, GenericWriteStorage, }, }, audio::AudioBundle, Error, gltf::GltfSceneLoaderSystemDesc, input::{InputBundle, StringBindings}, prelude::*, renderer::{ plugins::{RenderPbr3D, RenderSkybox, RenderToWindow}, RenderingBundle, Texture, types::{DefaultBackend, Mesh, MeshData}, }, ui::{RenderUi, UiBundle}, utils::{ application_root_dir, auto_fov::AutoFovSystem, tag::{Tag, TagFinder}, }, }; // Our own rendering plugins. use amethyst_particle::ParticleRender; use combat_render::flash::pass::FlashRender; use space_render::AtmosphereRender; use space_render::cosmos::{Cosmos, CosmosRender}; use space_render::StarRender; pub use action::Action; pub use add_to_limit::*; use crate::game; use crate::game::combat::process::Principal; use crate::game::ui::font::GameFonts; use crate::state::*; use rand::Rng; use crate::game::character::{CharacterStore, CharacterRole}; use crate::game::combat::ability::{AbilityList, AbilityUsability}; use crate::game::combat::ability::charge::ChargeAbility; use crate::game::combat::ability::spawn::SpawnAbility; use crate::game::combat::ability::hack::HackAbility; use crate::game::combat::ability::barrage::BarrageAbility; use crate::game::combat::ability::twin_shot::TwinShotAbility; use crate::game::combat::ability::focused_charge::FocusedChargeAbility; use crate::game::combat::ability::snipe::SnipeAbility; use crate::game::combat::ability::annihilate::AnnihilateAbility; use crate::game::map::CombatStore; use crate::game::combat::{CombatData, Wave}; use crate::game::CoreGameBundle; use crate::core::rebuild_pass::RebuildPlugin; pub mod action; pub mod activity; pub mod rebuild_pass; mod add_to_limit; /// Builds the application object for the game. /// This contains the built ECS data, which has all the required logic registered. pub fn build_application<'a, 'b, 'c, S: State<AggregateData<'a, 'b>, StateEvent> + 'c>(initial_state: S) -> Result<Application<'c, AggregateData<'a, 'b>>, Error> { // Get the application root directory for asset loading. let app_root = application_root_dir()?; // Add our meshes directory to the asset loader. let assets_dir = app_root.join("assets"); // Load display config let display_config_path = app_root.join("config\\display.ron"); let mut world: World = World::new(); let game_data = AggregateDataBuilder::new() // GAME SYSTEMS - these are only run when actually 'in' the game world, such as game logic and input systems. .with_combat( GameDataBuilder::new() // Our custom core game bundle data. .with_bundle(game::combat::CombatBundle)? .with_bundle(game::character::CharacterBundle)? ) .with_map( GameDataBuilder::new() // Our custom core game bundle data. .with_bundle(game::map::MapBundle)? ) // UI SYSTEMS - these should be ui specific and should not be required if not running with a UI. // TODO: implement UI .with_ui( GameDataBuilder::new() .with_system_desc( crate::game::control::camera::menu::MenuCameraSystemDesc::default(), "menu_camera", &[], ) ) // CORE SYSTEMS - these should be present at all times during the lifecycle of the game. // This allows us to load assets at any time, even when not in the game state. // FOV will update wen window is resized. //.with_core(AutoFovSystem::default(), "auto_fov", &[]) .with_core( GameDataBuilder::new() // Input bundle. .with_bundle(InputBundle::<StringBindings>::new())? // Automatic FOV and aspect ration modifications .with( AutoFovSystem::new(), "auto_fov", &[], ) // Register drone loader. // .with_system_desc( // PrefabLoaderSystemDesc::<game::drone::DronePrefabData>::default(), // "drone_loader", // &[], // ) // Register map loader. .with_system_desc( PrefabLoaderSystemDesc::<game::map::MapPrefabData>::default(), "map_loader", &[], ) // Register scene loader. .with_system_desc( PrefabLoaderSystemDesc::<game::map::WorldPrefabData>::default(), "scene_loader", &[], ) // Register character loader. .with_system_desc( PrefabLoaderSystemDesc::<game::character::CharacterPrefabData>::default(), "character_loader", &[], ) // 3D asset loading using the gltf format. .with_system_desc( GltfSceneLoaderSystemDesc::default(), "gltf_loader", &["map_loader", "scene_loader", "character_loader"], // This is important so that entity instantiation is performed in a single frame. ) // Animation system. // .with_bundle( // AnimationBundle::<usize, Transform>::new("animation_control", "sampler_interpolation") // .with_dep(&["gltf_loader"]), // )? // Basic transforms. .with_bundle(TransformBundle::new().with_dep(&[ //"animation_control", //"sampler_interpolation", ]))? // Vertex skinning (applying bones to vertices) and manipulating. // .with_bundle(VertexSkinningBundle::new().with_dep(&[ // "transform_system", // //"animation_control", // "sampler_interpolation", // ]))? // Amethyst core UI bundle - this requires other systems within the dispatcher to function properly. .with_bundle(UiBundle::<StringBindings>::new())? // Audio bundle .with_bundle(AudioBundle::default())? // RENDER SYSTEMS - these should be required only for rendering abstract data to the screen. // Add the render bundle. .with_bundle(CoreGameBundle)? .with_system_desc( HideHierarchySystemDesc::default(), "hide_hierarchy", &[], ) .with_bundle( RenderingBundle::<DefaultBackend>::new() // Clear color is black - for custom background colors (e.g. for ui) extra geometry will need to be rendered. .with_plugin(RenderToWindow::from_config_path(display_config_path)?.with_clear([0.0, 0.0, 0.0, 0.0])) .with_plugin(RenderPbr3D::default()) // Our own custom plugins .with_plugin(CosmosRender::new(Some(Cosmos::default()))) .with_plugin(FlashRender::new("textures/flash_billboard.png")) .with_plugin(StarRender::new("textures/star_glow.png")) .with_plugin(AtmosphereRender::default()) .with_plugin(ParticleRender::default()) .with_plugin(RebuildPlugin::default()) // Ui rendering. .with_plugin(RenderUi::default()) )? ); Ok(Application::new(assets_dir, initial_state, game_data)?) } pub fn get_root<'s, 'a, T, P, C>( parents: &P, components: &'a C, entity: Entity, ) -> Option<(&'a T, Entity)> where T: Component, P: GenericReadStorage<Component=Parent>, C: GenericReadStorage<Component=T> { if let Some(component) = components.get(entity) { Some((component, entity)) } else { let parent_ent: Entity = parents.get(entity)?.entity; get_root( parents, components, parent_ent, ) } } pub fn
<'s, 'a, T, P, C>( parents: &P, components: &'a C, entity: Entity, ) -> Option<(T, Entity)> where T: Component + Clone, P: GenericReadStorage<Component=Parent>, C: GenericReadStorage<Component=T> { if let Some(component) = components.get(entity) { Some((component.clone(), entity)) } else { let parent_ent: Entity = parents.get(entity)?.entity; get_root_cloned( parents, components, parent_ent, ) } } pub fn get_root_mut<'s, 'a, T, P, C>( parents: &P, components: &'a mut C, entity: Entity, ) -> Option<(&'a mut T, Entity)> where T: Component, P: GenericReadStorage<Component=Parent>, C: GenericWriteStorage<Component=T> { if components.get_mut(entity).is_some() { Some((components.get_mut(entity).unwrap(), entity)) } else { let parent_ent: Entity = parents.get(entity)?.entity; get_root_mut( parents, components, parent_ent, ) } } pub fn load_texture(world: &mut World, path: impl Into<String>) -> Handle<Texture> { if !world.has_value::<AssetStorage::<Texture>>() { world.insert(AssetStorage::<Texture>::new()); } let loader = world.read_resource::<Loader>(); loader.load( path, amethyst::renderer::formats::texture::ImageFormat::default(), (), &world.read_resource::<AssetStorage<Texture>>(), ) } /// Rolls a 'dice' based on the specified chance. pub fn roll(chance: f32) -> bool { let mut rng = rand::thread_rng(); let value: f32 = rng.gen_range(0.0, 1.0); if value < chance { true } else { false } } pub fn select_rng(values: &[f32]) -> Option<usize> { if values.is_empty() { return None; } let mut total: f32 = 0.0; for value in values { total += *value; } if total <= 0.0 { return None; } let mut rng = rand::thread_rng(); let random: f32 = rng.gen_range(0.0, total); let mut current: f32 = 0.0; for (i, value) in values.iter().enumerate() { if *value != 0.0 { let next: f32 = current + *value; if random >= current && random < next { return Some(i); } current = next; } } panic!("No value selected. The slice must contain non zero values..."); }
get_root_cloned
identifier_name
term_evaluation_result.js
/** * Created by Administrator on 2018/5/25. */ define(['jquery', C.CLF('avalon.js'), 'layer', C.Co('reconsider_manage/examine_review', 'term_evaluation_result/term_evaluation_result', 'html!'), C.Co('reconsider_manage/examine_review', 'term_evaluation_result/term_evaluation_result', 'css!'), C.CMF("data_center.js"), C.CM("three_menu_module"), C.CM("select_assembly"), C.CM("tuploader") ], function ($, avalon, layer, html, css, data_center, three_menu_module, select_assembly, tuploader) { var avalon_define = function () { var grade_list = []; var semester_full = []; var uploader = null; function request_after(a, b, c, is_suc, msg) { if (!is_suc) { toastr.error(msg) }else{ vm.init(); } } function yy_treat(form)
var vm = avalon.define({ $id: "term_evaluation_result", //图片是否展开(注:如果数据是循环出来,不能用这种方式) is_open: false, // 接口中未返回区县信息, 暂时使用用户所在区县 district: "", //核查意见 opinion: '', //更正结果 correct:"", // 图片显示相关支持 user_photo: cloud.user_photo, url_img: url_img, //学期 semester_name:"", //下拉列表是否初始化 is_init_sel: true, head_value: {grade: "请选择年级", class: "请选择班级", semester: "请选择学期", project: "请选择项目"}, project_list: [], filter: {code: "", name: ""}, filter_show: function (el) { if (this.filter.name == "" && el.stu_num.indexOf(this.filter.code) >= 0) return true; else if (this.filter.code == "" && el.stu_name.indexOf(this.filter.name) >= 0) return true; else if (this.filter.name == "" && this.filter.code == "") return true; else if (el.stu_num.indexOf(this.filter.code) && el.stu_name.indexOf(this.filter.name)) return true; return false; }, form_list: { city:"", classId:"", district:"", gradeId:"", offset:0, rows:9999999, schoolId:"", semesterId:"", state:3, studentName:"", studentNum:"", /*=================================*/ subjectId: "" }, headers:[], list: [], change_project:function (value, index) { this.form_list.subjectId = value.value; // 查询项目表头 cloud.pj_headers({subjectId:value.value}, function (url,args,data) { vm.headers = data; }); data_center.scope("term_eva_opt_project", function (p) { p.head_value = value.name; vm.semester_name = value.name; }); }, change_grade: function (value, index) { this.form_list.gradeId = Number(value.value); var ori_class = grade_list[index].class_list; // 获取班级列表 var sel_class_ls = any_2_select(ori_class, {name: "class_name", value: ["class_id"]}) this.class_list = sel_class_ls; // 修改对应显示信息 data_center.scope("term_eva_opt_grade", function (p) { p.head_value = value.name; }); // 查看该年级的评价报告项目列表 var full_project_list = cloud.get_semeter_pj_project({ca_gradeid:this.form_list.gradeId, state:5, ca_workid:vm.form_list.schoolId}); vm.project_list = any_2_select(full_project_list, {name:"ca_name", value:["id"]}); if(vm.project_list.length == 0){ toastr.warning('未发现评价项目'); return; } vm.change_project(vm.project_list[0], 0); this.change_class(this.class_list[0], 0); }, change_class: function (value, index) { data_center.scope("term_eva_opt_class", function (p) { p.head_value = value.name; }); // this.form_list.class_id = value.value; // 获取有异议的列表 this.form_list.classId = value.value; this.list=[] // 获取审核列表 cloud.xq_sh_list(this.form_list.$model, function (url, arg, data) { var count = 0 // 针对列表获取对应的异议 if(!data.list||data.list.length==0) return; data.list.forEach(function (value, index) { var info = cloud.user_info({guid: value.studentId}); value = $.extend(value, info); value.percentileOne = value.percentileOne.split(","); cloud.fy_jdbg({subjectId: value.subjectId, nameId2: value.studentId}, function (url, arg, ret) { value.dissent = ret; vm.list.push(value) }); }) }); }, grade_list: [], class_list: [], current_type: "0", //弹出框,选择的文件名 file_name: '请选择文件', type_arr: [ {value: "0", "name": "全部类型"}, {value: "1", "name": "品德发展"}, {value: "2", "name": "艺术素养"}, {value: "3", "name": "社会实践"}, {value: "4", "name": "学业水平"}, {value: "5", "name": "身体健康"}, {value: "6", "name": "成就奖励"}, {value: "7", "name": "日常表现"}, ], init: function () { cloud.semester_current({}, function (url, ars, data) { vm.form_list.semesterId = data.id; setTimeout(function (args) { vm.district = cloud.user_district(); vm.form_list.city = cloud.user_city(); vm.form_list.district = vm.district; vm.form_list.schoolId = cloud.user_depart_id(); // -> 不同的身份,获取的班级,年级列表不一样 grade_list = cloud.auto_grade_list({}); vm.grade_list = any_2_select(grade_list, {name: "grade_name", value: ["grade_id"]}); vm.change_grade(vm.grade_list[0], 0); }, 0); }); }, // change_type: function (value, index) { this.current_type = value.value; }, //跳转页面 change_page: function (page) { window.location = "#" + page; }, //展开或收起图片(注:如果数据是循环出来,不能用这种方式) open_close: function (w) { if (w == 1) { this.is_open = true; } else { this.is_open = false; } }, audited_one:{}, //异议无效 disagree: function (el) { //缺附件 缺更正结果 var form = { auditOpinion:"0", auditResult:"", correct:"", material:"", name:D("user.user.name"), nameId:D("user.user.guid"), remark:"处理的意见记录字符串", stuId:el.studentId, // stuName:el.studentName, subjectId:el.subjectId } layer.confirm('确定异议无效吗?', { btn: ['确定', '取消'] //按钮 }, function () { vm.audited_one = el; yy_treat(form); layer.closeAll(); }, function () { }); }, //异议成立 establish: function (el) { //选的文件初始化 $('#file').change(function (e) { self.file_name = e.currentTarget.files[0].name; }); uploader.cb = function (up, data, status) { var status = data[0].status; if (status == "success") { var form = { auditOpinion:"1", auditResult:vm.opinion, correct:vm.correct, material:"", name:D("user.user.name"), nameId:D("user.user.guid"), remark:"处理的意见记录字符串", stuId:el.studentId, // stuName:el.studentName, subjectId:el.subjectId } form.material=JSON.stringify(data[0]) tuploader.clear(uploader); vm.audited_one = el; yy_treat(form); vm.opinion = ""; } else { toastr.error("附件上传失败") } }; layer.open({ title: '核查意见', type: 1, area: ['700px', '450px'], content: $('#v_layer'), btn: ['确定', '取消'], yes: function (index, layero) { if (uploader.files.length != 0) { uploader.start(); layer.closeAll(); } else { toastr.warning("请设置照片"); } }, cancel: function () { //右上角关闭回调 } }); }, on_request_complete: function (cmd, status, data, is_suc, msg) { /**/ if (is_suc) { switch (cmd) { default: break; } } else { toastr.error(msg); } } }); vm.$watch("onReady", function () { var token = sessionStorage.getItem("token"); uploader = tuploader.init("file", token, undefined, false); vm.init(); }); return vm; } return { view: html, define: avalon_define } });
{ var is_school_user = cloud.is_school_user(); var distict_id = ''; if (is_school_user) { distict_id = cloud.school_user_distict_id().district_id; } var user = cloud.user_user(); form.district_id = distict_id; form.district = user.district; form.fk_school_id = user.fk_school_id; form.school_name = user.school_name; form.account = user.account; form.name = user.name; form.fk_grade_id = vm.audited_one.fk_grade_id; form.grade_name = vm.audited_one.grade_name; form.fk_class_id = vm.audited_one.fk_class_id; form.class_name = vm.audited_one.class_name; form.student_num = vm.audited_one.code; form.student_name = vm.audited_one.name; cloud.add_audit(form, request_after); }
identifier_body
term_evaluation_result.js
/** * Created by Administrator on 2018/5/25. */ define(['jquery', C.CLF('avalon.js'), 'layer', C.Co('reconsider_manage/examine_review', 'term_evaluation_result/term_evaluation_result', 'html!'), C.Co('reconsider_manage/examine_review', 'term_evaluation_result/term_evaluation_result', 'css!'), C.CMF("data_center.js"), C.CM("three_menu_module"), C.CM("select_assembly"), C.CM("tuploader") ], function ($, avalon, layer, html, css, data_center, three_menu_module, select_assembly, tuploader) { var avalon_define = function () { var grade_list = []; var semester_full = []; var uploader = null; function request_after(a, b, c, is_suc, msg) { if (!is_suc) { toastr.error(msg) }else{ vm.init(); } } function yy_treat(form) { var is_school_user = cloud.is_school_user(); var distict_id = ''; if (is_school_user) { distict_id = cloud.school_user_distict_id().district_id; } var user = cloud.user_user(); form.district_id = distict_id; form.district = user.district; form.fk_school_id = user.fk_school_id; form.school_name = user.school_name; form.account = user.account; form.name = user.name; form.fk_grade_id = vm.audited_one.fk_grade_id; form.grade_name = vm.audited_one.grade_name; form.fk_class_id = vm.audited_one.fk_class_id; form.class_name = vm.audited_one.class_name; form.student_num = vm.audited_one.code; form.student_name = vm.audited_one.name; cloud.add_audit(form, request_after); }
//图片是否展开(注:如果数据是循环出来,不能用这种方式) is_open: false, // 接口中未返回区县信息, 暂时使用用户所在区县 district: "", //核查意见 opinion: '', //更正结果 correct:"", // 图片显示相关支持 user_photo: cloud.user_photo, url_img: url_img, //学期 semester_name:"", //下拉列表是否初始化 is_init_sel: true, head_value: {grade: "请选择年级", class: "请选择班级", semester: "请选择学期", project: "请选择项目"}, project_list: [], filter: {code: "", name: ""}, filter_show: function (el) { if (this.filter.name == "" && el.stu_num.indexOf(this.filter.code) >= 0) return true; else if (this.filter.code == "" && el.stu_name.indexOf(this.filter.name) >= 0) return true; else if (this.filter.name == "" && this.filter.code == "") return true; else if (el.stu_num.indexOf(this.filter.code) && el.stu_name.indexOf(this.filter.name)) return true; return false; }, form_list: { city:"", classId:"", district:"", gradeId:"", offset:0, rows:9999999, schoolId:"", semesterId:"", state:3, studentName:"", studentNum:"", /*=================================*/ subjectId: "" }, headers:[], list: [], change_project:function (value, index) { this.form_list.subjectId = value.value; // 查询项目表头 cloud.pj_headers({subjectId:value.value}, function (url,args,data) { vm.headers = data; }); data_center.scope("term_eva_opt_project", function (p) { p.head_value = value.name; vm.semester_name = value.name; }); }, change_grade: function (value, index) { this.form_list.gradeId = Number(value.value); var ori_class = grade_list[index].class_list; // 获取班级列表 var sel_class_ls = any_2_select(ori_class, {name: "class_name", value: ["class_id"]}) this.class_list = sel_class_ls; // 修改对应显示信息 data_center.scope("term_eva_opt_grade", function (p) { p.head_value = value.name; }); // 查看该年级的评价报告项目列表 var full_project_list = cloud.get_semeter_pj_project({ca_gradeid:this.form_list.gradeId, state:5, ca_workid:vm.form_list.schoolId}); vm.project_list = any_2_select(full_project_list, {name:"ca_name", value:["id"]}); if(vm.project_list.length == 0){ toastr.warning('未发现评价项目'); return; } vm.change_project(vm.project_list[0], 0); this.change_class(this.class_list[0], 0); }, change_class: function (value, index) { data_center.scope("term_eva_opt_class", function (p) { p.head_value = value.name; }); // this.form_list.class_id = value.value; // 获取有异议的列表 this.form_list.classId = value.value; this.list=[] // 获取审核列表 cloud.xq_sh_list(this.form_list.$model, function (url, arg, data) { var count = 0 // 针对列表获取对应的异议 if(!data.list||data.list.length==0) return; data.list.forEach(function (value, index) { var info = cloud.user_info({guid: value.studentId}); value = $.extend(value, info); value.percentileOne = value.percentileOne.split(","); cloud.fy_jdbg({subjectId: value.subjectId, nameId2: value.studentId}, function (url, arg, ret) { value.dissent = ret; vm.list.push(value) }); }) }); }, grade_list: [], class_list: [], current_type: "0", //弹出框,选择的文件名 file_name: '请选择文件', type_arr: [ {value: "0", "name": "全部类型"}, {value: "1", "name": "品德发展"}, {value: "2", "name": "艺术素养"}, {value: "3", "name": "社会实践"}, {value: "4", "name": "学业水平"}, {value: "5", "name": "身体健康"}, {value: "6", "name": "成就奖励"}, {value: "7", "name": "日常表现"}, ], init: function () { cloud.semester_current({}, function (url, ars, data) { vm.form_list.semesterId = data.id; setTimeout(function (args) { vm.district = cloud.user_district(); vm.form_list.city = cloud.user_city(); vm.form_list.district = vm.district; vm.form_list.schoolId = cloud.user_depart_id(); // -> 不同的身份,获取的班级,年级列表不一样 grade_list = cloud.auto_grade_list({}); vm.grade_list = any_2_select(grade_list, {name: "grade_name", value: ["grade_id"]}); vm.change_grade(vm.grade_list[0], 0); }, 0); }); }, // change_type: function (value, index) { this.current_type = value.value; }, //跳转页面 change_page: function (page) { window.location = "#" + page; }, //展开或收起图片(注:如果数据是循环出来,不能用这种方式) open_close: function (w) { if (w == 1) { this.is_open = true; } else { this.is_open = false; } }, audited_one:{}, //异议无效 disagree: function (el) { //缺附件 缺更正结果 var form = { auditOpinion:"0", auditResult:"", correct:"", material:"", name:D("user.user.name"), nameId:D("user.user.guid"), remark:"处理的意见记录字符串", stuId:el.studentId, // stuName:el.studentName, subjectId:el.subjectId } layer.confirm('确定异议无效吗?', { btn: ['确定', '取消'] //按钮 }, function () { vm.audited_one = el; yy_treat(form); layer.closeAll(); }, function () { }); }, //异议成立 establish: function (el) { //选的文件初始化 $('#file').change(function (e) { self.file_name = e.currentTarget.files[0].name; }); uploader.cb = function (up, data, status) { var status = data[0].status; if (status == "success") { var form = { auditOpinion:"1", auditResult:vm.opinion, correct:vm.correct, material:"", name:D("user.user.name"), nameId:D("user.user.guid"), remark:"处理的意见记录字符串", stuId:el.studentId, // stuName:el.studentName, subjectId:el.subjectId } form.material=JSON.stringify(data[0]) tuploader.clear(uploader); vm.audited_one = el; yy_treat(form); vm.opinion = ""; } else { toastr.error("附件上传失败") } }; layer.open({ title: '核查意见', type: 1, area: ['700px', '450px'], content: $('#v_layer'), btn: ['确定', '取消'], yes: function (index, layero) { if (uploader.files.length != 0) { uploader.start(); layer.closeAll(); } else { toastr.warning("请设置照片"); } }, cancel: function () { //右上角关闭回调 } }); }, on_request_complete: function (cmd, status, data, is_suc, msg) { /**/ if (is_suc) { switch (cmd) { default: break; } } else { toastr.error(msg); } } }); vm.$watch("onReady", function () { var token = sessionStorage.getItem("token"); uploader = tuploader.init("file", token, undefined, false); vm.init(); }); return vm; } return { view: html, define: avalon_define } });
var vm = avalon.define({ $id: "term_evaluation_result",
random_line_split
term_evaluation_result.js
/** * Created by Administrator on 2018/5/25. */ define(['jquery', C.CLF('avalon.js'), 'layer', C.Co('reconsider_manage/examine_review', 'term_evaluation_result/term_evaluation_result', 'html!'), C.Co('reconsider_manage/examine_review', 'term_evaluation_result/term_evaluation_result', 'css!'), C.CMF("data_center.js"), C.CM("three_menu_module"), C.CM("select_assembly"), C.CM("tuploader") ], function ($, avalon, layer, html, css, data_center, three_menu_module, select_assembly, tuploader) { var avalon_define = function () { var grade_list = []; var semester_full = []; var uploader = null; function request_after(a, b, c, is_suc, msg) { if (!is_suc) { toastr.error(msg) }else{ vm.init(); } } function
(form) { var is_school_user = cloud.is_school_user(); var distict_id = ''; if (is_school_user) { distict_id = cloud.school_user_distict_id().district_id; } var user = cloud.user_user(); form.district_id = distict_id; form.district = user.district; form.fk_school_id = user.fk_school_id; form.school_name = user.school_name; form.account = user.account; form.name = user.name; form.fk_grade_id = vm.audited_one.fk_grade_id; form.grade_name = vm.audited_one.grade_name; form.fk_class_id = vm.audited_one.fk_class_id; form.class_name = vm.audited_one.class_name; form.student_num = vm.audited_one.code; form.student_name = vm.audited_one.name; cloud.add_audit(form, request_after); } var vm = avalon.define({ $id: "term_evaluation_result", //图片是否展开(注:如果数据是循环出来,不能用这种方式) is_open: false, // 接口中未返回区县信息, 暂时使用用户所在区县 district: "", //核查意见 opinion: '', //更正结果 correct:"", // 图片显示相关支持 user_photo: cloud.user_photo, url_img: url_img, //学期 semester_name:"", //下拉列表是否初始化 is_init_sel: true, head_value: {grade: "请选择年级", class: "请选择班级", semester: "请选择学期", project: "请选择项目"}, project_list: [], filter: {code: "", name: ""}, filter_show: function (el) { if (this.filter.name == "" && el.stu_num.indexOf(this.filter.code) >= 0) return true; else if (this.filter.code == "" && el.stu_name.indexOf(this.filter.name) >= 0) return true; else if (this.filter.name == "" && this.filter.code == "") return true; else if (el.stu_num.indexOf(this.filter.code) && el.stu_name.indexOf(this.filter.name)) return true; return false; }, form_list: { city:"", classId:"", district:"", gradeId:"", offset:0, rows:9999999, schoolId:"", semesterId:"", state:3, studentName:"", studentNum:"", /*=================================*/ subjectId: "" }, headers:[], list: [], change_project:function (value, index) { this.form_list.subjectId = value.value; // 查询项目表头 cloud.pj_headers({subjectId:value.value}, function (url,args,data) { vm.headers = data; }); data_center.scope("term_eva_opt_project", function (p) { p.head_value = value.name; vm.semester_name = value.name; }); }, change_grade: function (value, index) { this.form_list.gradeId = Number(value.value); var ori_class = grade_list[index].class_list; // 获取班级列表 var sel_class_ls = any_2_select(ori_class, {name: "class_name", value: ["class_id"]}) this.class_list = sel_class_ls; // 修改对应显示信息 data_center.scope("term_eva_opt_grade", function (p) { p.head_value = value.name; }); // 查看该年级的评价报告项目列表 var full_project_list = cloud.get_semeter_pj_project({ca_gradeid:this.form_list.gradeId, state:5, ca_workid:vm.form_list.schoolId}); vm.project_list = any_2_select(full_project_list, {name:"ca_name", value:["id"]}); if(vm.project_list.length == 0){ toastr.warning('未发现评价项目'); return; } vm.change_project(vm.project_list[0], 0); this.change_class(this.class_list[0], 0); }, change_class: function (value, index) { data_center.scope("term_eva_opt_class", function (p) { p.head_value = value.name; }); // this.form_list.class_id = value.value; // 获取有异议的列表 this.form_list.classId = value.value; this.list=[] // 获取审核列表 cloud.xq_sh_list(this.form_list.$model, function (url, arg, data) { var count = 0 // 针对列表获取对应的异议 if(!data.list||data.list.length==0) return; data.list.forEach(function (value, index) { var info = cloud.user_info({guid: value.studentId}); value = $.extend(value, info); value.percentileOne = value.percentileOne.split(","); cloud.fy_jdbg({subjectId: value.subjectId, nameId2: value.studentId}, function (url, arg, ret) { value.dissent = ret; vm.list.push(value) }); }) }); }, grade_list: [], class_list: [], current_type: "0", //弹出框,选择的文件名 file_name: '请选择文件', type_arr: [ {value: "0", "name": "全部类型"}, {value: "1", "name": "品德发展"}, {value: "2", "name": "艺术素养"}, {value: "3", "name": "社会实践"}, {value: "4", "name": "学业水平"}, {value: "5", "name": "身体健康"}, {value: "6", "name": "成就奖励"}, {value: "7", "name": "日常表现"}, ], init: function () { cloud.semester_current({}, function (url, ars, data) { vm.form_list.semesterId = data.id; setTimeout(function (args) { vm.district = cloud.user_district(); vm.form_list.city = cloud.user_city(); vm.form_list.district = vm.district; vm.form_list.schoolId = cloud.user_depart_id(); // -> 不同的身份,获取的班级,年级列表不一样 grade_list = cloud.auto_grade_list({}); vm.grade_list = any_2_select(grade_list, {name: "grade_name", value: ["grade_id"]}); vm.change_grade(vm.grade_list[0], 0); }, 0); }); }, // change_type: function (value, index) { this.current_type = value.value; }, //跳转页面 change_page: function (page) { window.location = "#" + page; }, //展开或收起图片(注:如果数据是循环出来,不能用这种方式) open_close: function (w) { if (w == 1) { this.is_open = true; } else { this.is_open = false; } }, audited_one:{}, //异议无效 disagree: function (el) { //缺附件 缺更正结果 var form = { auditOpinion:"0", auditResult:"", correct:"", material:"", name:D("user.user.name"), nameId:D("user.user.guid"), remark:"处理的意见记录字符串", stuId:el.studentId, // stuName:el.studentName, subjectId:el.subjectId } layer.confirm('确定异议无效吗?', { btn: ['确定', '取消'] //按钮 }, function () { vm.audited_one = el; yy_treat(form); layer.closeAll(); }, function () { }); }, //异议成立 establish: function (el) { //选的文件初始化 $('#file').change(function (e) { self.file_name = e.currentTarget.files[0].name; }); uploader.cb = function (up, data, status) { var status = data[0].status; if (status == "success") { var form = { auditOpinion:"1", auditResult:vm.opinion, correct:vm.correct, material:"", name:D("user.user.name"), nameId:D("user.user.guid"), remark:"处理的意见记录字符串", stuId:el.studentId, // stuName:el.studentName, subjectId:el.subjectId } form.material=JSON.stringify(data[0]) tuploader.clear(uploader); vm.audited_one = el; yy_treat(form); vm.opinion = ""; } else { toastr.error("附件上传失败") } }; layer.open({ title: '核查意见', type: 1, area: ['700px', '450px'], content: $('#v_layer'), btn: ['确定', '取消'], yes: function (index, layero) { if (uploader.files.length != 0) { uploader.start(); layer.closeAll(); } else { toastr.warning("请设置照片"); } }, cancel: function () { //右上角关闭回调 } }); }, on_request_complete: function (cmd, status, data, is_suc, msg) { /**/ if (is_suc) { switch (cmd) { default: break; } } else { toastr.error(msg); } } }); vm.$watch("onReady", function () { var token = sessionStorage.getItem("token"); uploader = tuploader.init("file", token, undefined, false); vm.init(); }); return vm; } return { view: html, define: avalon_define } });
yy_treat
identifier_name
MatrixFromWeights_py27.py
import numpy as np from numpy.linalg import eig import cmath import math import sys from functools import reduce #import pandas as pd #import pylab as pl import array #from scipy.fftpack import fft, ifft from scipy.optimize import nnls class Rotation: """ * Rotation : provides a representation for 3D space rotations * using euler angles (ZX'Z'' convention) or rotation matrices """ def _euler2mat_z1x2z3(self, z1=0, x2=0, z3=0): cosz1 = math.cos(z1) sinz1 = math.sin(z1) Z1 = np.array( [[cosz1, -sinz1, 0], [sinz1, cosz1, 0], [0, 0, 1]]) cosx = math.cos(x2) sinx = math.sin(x2) X2 = np.array( [[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]]) cosz3 = math.cos(z3) sinz3 = math.sin(z3) Z3 = np.array( [[cosz3, -sinz3, 0], [sinz3, cosz3, 0], [0, 0, 1]]) return reduce(np.dot, [Z1, X2, Z3]) def _mat2euler(self, M): M = np.asarray(M)
except ValueError: sy_thresh = _FLOAT_EPS_4 r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat sy = math.sqrt(r31 * r31 + r32 * r32) if sy > sy_thresh: x2 = math.acos(r33) z1 = math.atan2(r13, -r23) z3 = math.atan2(r31, r32) else: x2 = 0 z3 = 0 z1 = math.atan2(r21, r22) return (z1, x2, z3) def _init_from_angles(self, z1, x2, z3): self._z1, self._x2, self._z3 = z1, x2, z3 self._M = self._euler2mat_z1x2z3(self._z1, self._x2, self._z3) def _init_from_matrix(self, matrix): self._M = np.asarray(matrix) self._z1, self._x2, self._z3 = self._mat2euler(self._M) def __init__(self, arg1=None, x2=None, z3=None): if arg1 is None: self._init_from_angles(0, 0, 0) # loads identity matrix elif x2 is not None: self._init_from_angles(arg1, x2, z3) elif arg1.size == 3: self._init_from_angles(arg1[0], arg1[1], arg1[2]) else: self._init_from_matrix(arg1) def matrix(self, new_matrix=None): if new_matrix is not None: self._init_from_matrix(new_matrix) return self._M def euler_angles(self, z1=None, x2=None, z3=None): if z1 is not None: self._init_from_angles(z1, x2, z3) return (self._z1, self._x2, self._z3) def random(self): V = 2. * math.pi * np.random.random(), np.arccos( 2.0 * np.random.random() - 1.0), 2. * math.pi * np.random.random() self.euler_angles(V) class TripletHamiltonian: def __init__(self): self.Id = np.matrix('1 0 0; 0 1 0; 0 0 1', dtype=np.complex_) self.Sz = np.matrix('1 0 0; 0 0 0; 0 0 -1', dtype=np.complex_) self.Sx = np.matrix('0 1 0; 1 0 1; 0 1 0', dtype=np.complex_) / math.sqrt(2.0) self.Sy = - 1j * np.matrix('0 1 0; -1 0 1; 0 -1 0', dtype=np.complex_) / math.sqrt(2.0) self.matrix_size = 3 def fine_structure(self, D, E, rotation=Rotation()): rotation_matrix = rotation.matrix() rSx = rotation_matrix[0, 0] * self.Sx + rotation_matrix[0, 1] * self.Sy + rotation_matrix[0, 2] * self.Sz rSy = rotation_matrix[1, 0] * self.Sx + rotation_matrix[1, 1] * self.Sy + rotation_matrix[1, 2] * self.Sz rSz = rotation_matrix[2, 0] * self.Sx + rotation_matrix[2, 1] * self.Sy + rotation_matrix[2, 2] * self.Sz return D * (np.dot(rSz, rSz) - 2. * self.Id / 3.) + E * (np.dot(rSy, rSy) - np.dot(rSx, rSx)) def zeeman(self, Bx, By, Bz): return Bx * self.Sx + By * self.Sy + Bz * self.Sz def spin_hamiltonian_mol_basis(self, D, E, B, theta, phi): Bz = B * math.cos(theta) Bx = B * math.sin(theta) * math.cos(phi) By = B * math.sin(theta) * math.sin(phi) return self.fine_structure(D, E) + self.zeeman(Bx, By, Bz) def spin_hamiltonian_field_basis(self, D, E, B, theta, phi): return self.fine_structure(D, E, Rotation(0, -theta, -phi + math.pi / 2.)) + self.zeeman(0, 0, B) #false? check c++ def evals(self, D, E, B, theta=0, phi=0, mol_basis=True): if mol_basis: return np.linalg.eigvals(self.spin_hamiltonian_mol_basis(D, E, B, theta, phi)) else: return np.linalg.eigvals(self.spin_hamiltonian_field_basis(D, E, B, theta, phi)) def evecs(self, D, E, B, theta=0, phi=0): self.eval, self.evec = np.linalg.eigh(self.spin_hamiltonian_mol_basis(D, E, B, theta, phi)) class ODMR_Signal: """ * ODMR_Signal * * Output : Computes ODMR and magnetic resonance signals * * Input : spins, a reference on SpinSystem object * SpinSystem should define * spins.matrix_size * spins.evec * spins.eval * spins.singlet_projector() * spins.Bac_field_basis_matrix() """ def __init__(self, spin_system): self.spins = spin_system self.rho0 = np.empty(self.spins.matrix_size, dtype=float) self.rho2 = np.empty([self.spins.matrix_size, self.spins.matrix_size], dtype=np.complex_) self.gamma = None self.gamma_diag = None def doV(self): self.V = reduce(np.dot, [np.matrix.getH(self.spins.evec), self.spins.Sx, self.spins.evec]) def omega_nm(self, m, n): return self.spins.eval[n] - self.spins.eval[m] def load_rho0_thermal(self, Temp): sum = 0 for i in range(self.spins.matrix_size): rho0_i = math.exp(- self.spins.eval[i] / Temp) self.rho0[i] = rho0_i sum += rho0_i self.rho0 /= sum return self.rho0 def chi1(self, omega): c1 = 0j for m in range(self.spins.matrix_size): for n in range(self.spins.matrix_size): # the contribution to chi1 vanishes for n == m, whether gamma is the same for diagonal and non diagonal elements is not relvant here Vmn = self.V[m,n] Vmn_abs2 = Vmn.real*Vmn.real + Vmn.imag*Vmn.imag c1 -= (self.rho0[m] - self.rho0[n]) * Vmn_abs2 / (self.omega_nm(n, m) - omega - 1j * self.gamma); return c1 ################################################ #ExpData Plot Sam's approach dataDC2 = np.loadtxt("testupto30up.txt", comments='%') # , usecols=(0,1,3),unpack=True) fieldDC2 = np.zeros(29) freqDC2 = (dataDC2[650:1415, 0]) / 1e6 freqStartDC2 = freqDC2[0] NumPoints = 765 freqStopDC2 = freqDC2[764] freqStepDC2 = freqDC2[11] - freqDC2[10] IntensityDC2 = np.zeros((29, 765)) # http://python3porting.com/differences.html#range-and-xrange for i in xrange(29): fieldDC2[i] = np.mean(dataDC2[i * 5000:(i + 1) * 5000, 1]) IntensityDC2[i, :] = dataDC2[i * 5000 + 650:i * 5000 + 1415, 3] dA = 5.0 # 45 a = math.radians(90.0) * (1.0 / dA + 1.0) # 91 degree for theta and phi b = a / dA # 45 #step for angles # http://stackoverflow.com/a/2958717/1032286 c = 81.0 / 28.0 # 30 #field step d = 80.0 + c # field limit tau = 5.0 # angles and field Phi = np.arange(0, a, b) Theta = np.arange(0, a, b) Magnetic = np.arange(0, d, c) Phi_deg = np.zeros(len(Phi)) Theta_deg = np.zeros(len(Theta)) print len(Phi), len(Theta) Na = len(Phi) * len(Theta) Np = IntensityDC2.size Nb = len(fieldDC2) LambdaM = np.zeros((Np, Na)) LambdaMepr = np.zeros((Np, Na)) trp = TripletHamiltonian() trp.D = 487.9 trp.E = 72.9 odmr = ODMR_Signal(trp) # for B: 2.9 mT = 81.27236559069694 MHz # 19.9 mT = 557.7 MHz # 12 mT = 336.3 MHz index_Phi = 0 index_a = 0 """ trp.evecs() odmr.update_from_spin_hamiltonian() odmr_from_triplets.update_from_spin_hamiltonian() odmr.load_rho0_from_singlet() odmr.gamma = 1e-2 odmr.gamma_diag = 1e-2 """ for trp.phi in Phi: index_Theta = 0 Phi_deg[index_Phi] = round(math.degrees(Phi[index_Phi])) for trp.theta in Theta: index_B = 0 index_p = 0 # print(index_a) Theta_deg[index_Theta] = round(math.degrees(Theta[index_Theta])) for i in xrange(len(freqDC2)): for trp.B in Magnetic: trp.evecs(trp.D, trp.E, trp.B, trp.theta, trp.phi) odmr.gamma = 1e-2 odmr.gamma_diag = 1e-2 odmr.doV() odmr.Temp = 41600000000 odmr.load_rho0_thermal(odmr.Temp) vals = sorted(trp.evals(trp.D, trp.E, trp.B, trp.theta, trp.phi, mol_basis=True)) x1 = (vals[1].real - vals[0].real) x2 = (vals[2].real - vals[0].real) LambdaM[index_p][index_a] = ((1.0 / (math.pow(((freqDC2[i] - x1)/ tau), 2.0) + 1.0)) + (1.0 / (math.pow(((freqDC2[i] - x2) / tau), 2.0) + 1.0))) * math.sin(trp.theta) LambdaMepr[index_p][index_a] = odmr.chi1(2*math.pi*freqDC2[i]) print math.exp(- vals[0] / odmr.Temp), math.exp(- vals[1] /odmr.Temp),math.exp(- vals[2] /odmr.Temp) print (odmr.rho0[2]-odmr.rho0[0]), (odmr.rho0[1]-odmr.rho0[0]) print odmr.chi1(2*math.pi*freqDC2[i]), odmr.rho0 print odmr.V index_p += 1 index_B += 1 index_a += 1 index_Theta += 1 index_Phi += 1 LamInv = np.linalg.pinv(LambdaM) Experiment = IntensityDC2.flat pVec1 = np.dot(LamInv, Experiment) # read weights from a file pMatrix = np.reshape(pVec1, (len(Phi), len(Theta))) TheoryVec = np.dot(LambdaM, pVec1) TheoryMatr = np.reshape(TheoryVec, (765, 29)) pVec2, rnorm1 = nnls(LambdaM,Experiment) pMatrix2 = np.reshape(pVec2, (len(Phi), len(Theta))) TheoryVec2 = np.dot(LambdaM, pVec2) TheoryMatr2 = np.reshape(TheoryVec2, (765, 29)) pVec3, rnorm2 = nnls(LambdaMepr,Experiment) pMatrix3 = np.reshape(pVec3, (len(Phi), len(Theta))) TheoryVec3 = np.dot(LambdaMepr, pVec3) TheoryMatr3 = np.reshape(TheoryVec3, (765, 29)) gnufile = open('TheoryFromWeights5nnlsEPR.dat', 'w+') Lfile = open('Lambdas5.dat', 'w+') for k in xrange(Na): for i in xrange(765): index_p = 0 for j in xrange(29): Lfile.write(str(freqDC2[i])+ ' ' + str(fieldDC2[j]) + ' ' + str(LambdaM[index_p][k]) + ' ' + str(LambdaMepr[index_p][k])) index_p += 1 Lfile.write("\n") Lfile.close for i in xrange(765): for j in xrange(29): gnufile.write(str(freqDC2[i]) + ' ' + str(fieldDC2[j])+ ' ' + str(TheoryMatr[i][j]) + ' ' + str(TheoryMatr2[i][j]) + ' ' + str(TheoryMatr3[i][j]) + '\n') gnufile.write("\n") gnufile.close
try: sy_thresh = np.finfo(M.dtype).eps * 4
random_line_split
MatrixFromWeights_py27.py
import numpy as np from numpy.linalg import eig import cmath import math import sys from functools import reduce #import pandas as pd #import pylab as pl import array #from scipy.fftpack import fft, ifft from scipy.optimize import nnls class Rotation:
class TripletHamiltonian: def __init__(self): self.Id = np.matrix('1 0 0; 0 1 0; 0 0 1', dtype=np.complex_) self.Sz = np.matrix('1 0 0; 0 0 0; 0 0 -1', dtype=np.complex_) self.Sx = np.matrix('0 1 0; 1 0 1; 0 1 0', dtype=np.complex_) / math.sqrt(2.0) self.Sy = - 1j * np.matrix('0 1 0; -1 0 1; 0 -1 0', dtype=np.complex_) / math.sqrt(2.0) self.matrix_size = 3 def fine_structure(self, D, E, rotation=Rotation()): rotation_matrix = rotation.matrix() rSx = rotation_matrix[0, 0] * self.Sx + rotation_matrix[0, 1] * self.Sy + rotation_matrix[0, 2] * self.Sz rSy = rotation_matrix[1, 0] * self.Sx + rotation_matrix[1, 1] * self.Sy + rotation_matrix[1, 2] * self.Sz rSz = rotation_matrix[2, 0] * self.Sx + rotation_matrix[2, 1] * self.Sy + rotation_matrix[2, 2] * self.Sz return D * (np.dot(rSz, rSz) - 2. * self.Id / 3.) + E * (np.dot(rSy, rSy) - np.dot(rSx, rSx)) def zeeman(self, Bx, By, Bz): return Bx * self.Sx + By * self.Sy + Bz * self.Sz def spin_hamiltonian_mol_basis(self, D, E, B, theta, phi): Bz = B * math.cos(theta) Bx = B * math.sin(theta) * math.cos(phi) By = B * math.sin(theta) * math.sin(phi) return self.fine_structure(D, E) + self.zeeman(Bx, By, Bz) def spin_hamiltonian_field_basis(self, D, E, B, theta, phi): return self.fine_structure(D, E, Rotation(0, -theta, -phi + math.pi / 2.)) + self.zeeman(0, 0, B) #false? check c++ def evals(self, D, E, B, theta=0, phi=0, mol_basis=True): if mol_basis: return np.linalg.eigvals(self.spin_hamiltonian_mol_basis(D, E, B, theta, phi)) else: return np.linalg.eigvals(self.spin_hamiltonian_field_basis(D, E, B, theta, phi)) def evecs(self, D, E, B, theta=0, phi=0): self.eval, self.evec = np.linalg.eigh(self.spin_hamiltonian_mol_basis(D, E, B, theta, phi)) class ODMR_Signal: """ * ODMR_Signal * * Output : Computes ODMR and magnetic resonance signals * * Input : spins, a reference on SpinSystem object * SpinSystem should define * spins.matrix_size * spins.evec * spins.eval * spins.singlet_projector() * spins.Bac_field_basis_matrix() """ def __init__(self, spin_system): self.spins = spin_system self.rho0 = np.empty(self.spins.matrix_size, dtype=float) self.rho2 = np.empty([self.spins.matrix_size, self.spins.matrix_size], dtype=np.complex_) self.gamma = None self.gamma_diag = None def doV(self): self.V = reduce(np.dot, [np.matrix.getH(self.spins.evec), self.spins.Sx, self.spins.evec]) def omega_nm(self, m, n): return self.spins.eval[n] - self.spins.eval[m] def load_rho0_thermal(self, Temp): sum = 0 for i in range(self.spins.matrix_size): rho0_i = math.exp(- self.spins.eval[i] / Temp) self.rho0[i] = rho0_i sum += rho0_i self.rho0 /= sum return self.rho0 def chi1(self, omega): c1 = 0j for m in range(self.spins.matrix_size): for n in range(self.spins.matrix_size): # the contribution to chi1 vanishes for n == m, whether gamma is the same for diagonal and non diagonal elements is not relvant here Vmn = self.V[m,n] Vmn_abs2 = Vmn.real*Vmn.real + Vmn.imag*Vmn.imag c1 -= (self.rho0[m] - self.rho0[n]) * Vmn_abs2 / (self.omega_nm(n, m) - omega - 1j * self.gamma); return c1 ################################################ #ExpData Plot Sam's approach dataDC2 = np.loadtxt("testupto30up.txt", comments='%') # , usecols=(0,1,3),unpack=True) fieldDC2 = np.zeros(29) freqDC2 = (dataDC2[650:1415, 0]) / 1e6 freqStartDC2 = freqDC2[0] NumPoints = 765 freqStopDC2 = freqDC2[764] freqStepDC2 = freqDC2[11] - freqDC2[10] IntensityDC2 = np.zeros((29, 765)) # http://python3porting.com/differences.html#range-and-xrange for i in xrange(29): fieldDC2[i] = np.mean(dataDC2[i * 5000:(i + 1) * 5000, 1]) IntensityDC2[i, :] = dataDC2[i * 5000 + 650:i * 5000 + 1415, 3] dA = 5.0 # 45 a = math.radians(90.0) * (1.0 / dA + 1.0) # 91 degree for theta and phi b = a / dA # 45 #step for angles # http://stackoverflow.com/a/2958717/1032286 c = 81.0 / 28.0 # 30 #field step d = 80.0 + c # field limit tau = 5.0 # angles and field Phi = np.arange(0, a, b) Theta = np.arange(0, a, b) Magnetic = np.arange(0, d, c) Phi_deg = np.zeros(len(Phi)) Theta_deg = np.zeros(len(Theta)) print len(Phi), len(Theta) Na = len(Phi) * len(Theta) Np = IntensityDC2.size Nb = len(fieldDC2) LambdaM = np.zeros((Np, Na)) LambdaMepr = np.zeros((Np, Na)) trp = TripletHamiltonian() trp.D = 487.9 trp.E = 72.9 odmr = ODMR_Signal(trp) # for B: 2.9 mT = 81.27236559069694 MHz # 19.9 mT = 557.7 MHz # 12 mT = 336.3 MHz index_Phi = 0 index_a = 0 """ trp.evecs() odmr.update_from_spin_hamiltonian() odmr_from_triplets.update_from_spin_hamiltonian() odmr.load_rho0_from_singlet() odmr.gamma = 1e-2 odmr.gamma_diag = 1e-2 """ for trp.phi in Phi: index_Theta = 0 Phi_deg[index_Phi] = round(math.degrees(Phi[index_Phi])) for trp.theta in Theta: index_B = 0 index_p = 0 # print(index_a) Theta_deg[index_Theta] = round(math.degrees(Theta[index_Theta])) for i in xrange(len(freqDC2)): for trp.B in Magnetic: trp.evecs(trp.D, trp.E, trp.B, trp.theta, trp.phi) odmr.gamma = 1e-2 odmr.gamma_diag = 1e-2 odmr.doV() odmr.Temp = 41600000000 odmr.load_rho0_thermal(odmr.Temp) vals = sorted(trp.evals(trp.D, trp.E, trp.B, trp.theta, trp.phi, mol_basis=True)) x1 = (vals[1].real - vals[0].real) x2 = (vals[2].real - vals[0].real) LambdaM[index_p][index_a] = ((1.0 / (math.pow(((freqDC2[i] - x1)/ tau), 2.0) + 1.0)) + (1.0 / (math.pow(((freqDC2[i] - x2) / tau), 2.0) + 1.0))) * math.sin(trp.theta) LambdaMepr[index_p][index_a] = odmr.chi1(2*math.pi*freqDC2[i]) print math.exp(- vals[0] / odmr.Temp), math.exp(- vals[1] /odmr.Temp),math.exp(- vals[2] /odmr.Temp) print (odmr.rho0[2]-odmr.rho0[0]), (odmr.rho0[1]-odmr.rho0[0]) print odmr.chi1(2*math.pi*freqDC2[i]), odmr.rho0 print odmr.V index_p += 1 index_B += 1 index_a += 1 index_Theta += 1 index_Phi += 1 LamInv = np.linalg.pinv(LambdaM) Experiment = IntensityDC2.flat pVec1 = np.dot(LamInv, Experiment) # read weights from a file pMatrix = np.reshape(pVec1, (len(Phi), len(Theta))) TheoryVec = np.dot(LambdaM, pVec1) TheoryMatr = np.reshape(TheoryVec, (765, 29)) pVec2, rnorm1 = nnls(LambdaM,Experiment) pMatrix2 = np.reshape(pVec2, (len(Phi), len(Theta))) TheoryVec2 = np.dot(LambdaM, pVec2) TheoryMatr2 = np.reshape(TheoryVec2, (765, 29)) pVec3, rnorm2 = nnls(LambdaMepr,Experiment) pMatrix3 = np.reshape(pVec3, (len(Phi), len(Theta))) TheoryVec3 = np.dot(LambdaMepr, pVec3) TheoryMatr3 = np.reshape(TheoryVec3, (765, 29)) gnufile = open('TheoryFromWeights5nnlsEPR.dat', 'w+') Lfile = open('Lambdas5.dat', 'w+') for k in xrange(Na): for i in xrange(765): index_p = 0 for j in xrange(29): Lfile.write(str(freqDC2[i])+ ' ' + str(fieldDC2[j]) + ' ' + str(LambdaM[index_p][k]) + ' ' + str(LambdaMepr[index_p][k])) index_p += 1 Lfile.write("\n") Lfile.close for i in xrange(765): for j in xrange(29): gnufile.write(str(freqDC2[i]) + ' ' + str(fieldDC2[j])+ ' ' + str(TheoryMatr[i][j]) + ' ' + str(TheoryMatr2[i][j]) + ' ' + str(TheoryMatr3[i][j]) + '\n') gnufile.write("\n") gnufile.close
""" * Rotation : provides a representation for 3D space rotations * using euler angles (ZX'Z'' convention) or rotation matrices """ def _euler2mat_z1x2z3(self, z1=0, x2=0, z3=0): cosz1 = math.cos(z1) sinz1 = math.sin(z1) Z1 = np.array( [[cosz1, -sinz1, 0], [sinz1, cosz1, 0], [0, 0, 1]]) cosx = math.cos(x2) sinx = math.sin(x2) X2 = np.array( [[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]]) cosz3 = math.cos(z3) sinz3 = math.sin(z3) Z3 = np.array( [[cosz3, -sinz3, 0], [sinz3, cosz3, 0], [0, 0, 1]]) return reduce(np.dot, [Z1, X2, Z3]) def _mat2euler(self, M): M = np.asarray(M) try: sy_thresh = np.finfo(M.dtype).eps * 4 except ValueError: sy_thresh = _FLOAT_EPS_4 r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat sy = math.sqrt(r31 * r31 + r32 * r32) if sy > sy_thresh: x2 = math.acos(r33) z1 = math.atan2(r13, -r23) z3 = math.atan2(r31, r32) else: x2 = 0 z3 = 0 z1 = math.atan2(r21, r22) return (z1, x2, z3) def _init_from_angles(self, z1, x2, z3): self._z1, self._x2, self._z3 = z1, x2, z3 self._M = self._euler2mat_z1x2z3(self._z1, self._x2, self._z3) def _init_from_matrix(self, matrix): self._M = np.asarray(matrix) self._z1, self._x2, self._z3 = self._mat2euler(self._M) def __init__(self, arg1=None, x2=None, z3=None): if arg1 is None: self._init_from_angles(0, 0, 0) # loads identity matrix elif x2 is not None: self._init_from_angles(arg1, x2, z3) elif arg1.size == 3: self._init_from_angles(arg1[0], arg1[1], arg1[2]) else: self._init_from_matrix(arg1) def matrix(self, new_matrix=None): if new_matrix is not None: self._init_from_matrix(new_matrix) return self._M def euler_angles(self, z1=None, x2=None, z3=None): if z1 is not None: self._init_from_angles(z1, x2, z3) return (self._z1, self._x2, self._z3) def random(self): V = 2. * math.pi * np.random.random(), np.arccos( 2.0 * np.random.random() - 1.0), 2. * math.pi * np.random.random() self.euler_angles(V)
identifier_body
MatrixFromWeights_py27.py
import numpy as np from numpy.linalg import eig import cmath import math import sys from functools import reduce #import pandas as pd #import pylab as pl import array #from scipy.fftpack import fft, ifft from scipy.optimize import nnls class Rotation: """ * Rotation : provides a representation for 3D space rotations * using euler angles (ZX'Z'' convention) or rotation matrices """ def _euler2mat_z1x2z3(self, z1=0, x2=0, z3=0): cosz1 = math.cos(z1) sinz1 = math.sin(z1) Z1 = np.array( [[cosz1, -sinz1, 0], [sinz1, cosz1, 0], [0, 0, 1]]) cosx = math.cos(x2) sinx = math.sin(x2) X2 = np.array( [[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]]) cosz3 = math.cos(z3) sinz3 = math.sin(z3) Z3 = np.array( [[cosz3, -sinz3, 0], [sinz3, cosz3, 0], [0, 0, 1]]) return reduce(np.dot, [Z1, X2, Z3]) def _mat2euler(self, M): M = np.asarray(M) try: sy_thresh = np.finfo(M.dtype).eps * 4 except ValueError: sy_thresh = _FLOAT_EPS_4 r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat sy = math.sqrt(r31 * r31 + r32 * r32) if sy > sy_thresh: x2 = math.acos(r33) z1 = math.atan2(r13, -r23) z3 = math.atan2(r31, r32) else: x2 = 0 z3 = 0 z1 = math.atan2(r21, r22) return (z1, x2, z3) def _init_from_angles(self, z1, x2, z3): self._z1, self._x2, self._z3 = z1, x2, z3 self._M = self._euler2mat_z1x2z3(self._z1, self._x2, self._z3) def _init_from_matrix(self, matrix): self._M = np.asarray(matrix) self._z1, self._x2, self._z3 = self._mat2euler(self._M) def __init__(self, arg1=None, x2=None, z3=None): if arg1 is None: self._init_from_angles(0, 0, 0) # loads identity matrix elif x2 is not None:
elif arg1.size == 3: self._init_from_angles(arg1[0], arg1[1], arg1[2]) else: self._init_from_matrix(arg1) def matrix(self, new_matrix=None): if new_matrix is not None: self._init_from_matrix(new_matrix) return self._M def euler_angles(self, z1=None, x2=None, z3=None): if z1 is not None: self._init_from_angles(z1, x2, z3) return (self._z1, self._x2, self._z3) def random(self): V = 2. * math.pi * np.random.random(), np.arccos( 2.0 * np.random.random() - 1.0), 2. * math.pi * np.random.random() self.euler_angles(V) class TripletHamiltonian: def __init__(self): self.Id = np.matrix('1 0 0; 0 1 0; 0 0 1', dtype=np.complex_) self.Sz = np.matrix('1 0 0; 0 0 0; 0 0 -1', dtype=np.complex_) self.Sx = np.matrix('0 1 0; 1 0 1; 0 1 0', dtype=np.complex_) / math.sqrt(2.0) self.Sy = - 1j * np.matrix('0 1 0; -1 0 1; 0 -1 0', dtype=np.complex_) / math.sqrt(2.0) self.matrix_size = 3 def fine_structure(self, D, E, rotation=Rotation()): rotation_matrix = rotation.matrix() rSx = rotation_matrix[0, 0] * self.Sx + rotation_matrix[0, 1] * self.Sy + rotation_matrix[0, 2] * self.Sz rSy = rotation_matrix[1, 0] * self.Sx + rotation_matrix[1, 1] * self.Sy + rotation_matrix[1, 2] * self.Sz rSz = rotation_matrix[2, 0] * self.Sx + rotation_matrix[2, 1] * self.Sy + rotation_matrix[2, 2] * self.Sz return D * (np.dot(rSz, rSz) - 2. * self.Id / 3.) + E * (np.dot(rSy, rSy) - np.dot(rSx, rSx)) def zeeman(self, Bx, By, Bz): return Bx * self.Sx + By * self.Sy + Bz * self.Sz def spin_hamiltonian_mol_basis(self, D, E, B, theta, phi): Bz = B * math.cos(theta) Bx = B * math.sin(theta) * math.cos(phi) By = B * math.sin(theta) * math.sin(phi) return self.fine_structure(D, E) + self.zeeman(Bx, By, Bz) def spin_hamiltonian_field_basis(self, D, E, B, theta, phi): return self.fine_structure(D, E, Rotation(0, -theta, -phi + math.pi / 2.)) + self.zeeman(0, 0, B) #false? check c++ def evals(self, D, E, B, theta=0, phi=0, mol_basis=True): if mol_basis: return np.linalg.eigvals(self.spin_hamiltonian_mol_basis(D, E, B, theta, phi)) else: return np.linalg.eigvals(self.spin_hamiltonian_field_basis(D, E, B, theta, phi)) def evecs(self, D, E, B, theta=0, phi=0): self.eval, self.evec = np.linalg.eigh(self.spin_hamiltonian_mol_basis(D, E, B, theta, phi)) class ODMR_Signal: """ * ODMR_Signal * * Output : Computes ODMR and magnetic resonance signals * * Input : spins, a reference on SpinSystem object * SpinSystem should define * spins.matrix_size * spins.evec * spins.eval * spins.singlet_projector() * spins.Bac_field_basis_matrix() """ def __init__(self, spin_system): self.spins = spin_system self.rho0 = np.empty(self.spins.matrix_size, dtype=float) self.rho2 = np.empty([self.spins.matrix_size, self.spins.matrix_size], dtype=np.complex_) self.gamma = None self.gamma_diag = None def doV(self): self.V = reduce(np.dot, [np.matrix.getH(self.spins.evec), self.spins.Sx, self.spins.evec]) def omega_nm(self, m, n): return self.spins.eval[n] - self.spins.eval[m] def load_rho0_thermal(self, Temp): sum = 0 for i in range(self.spins.matrix_size): rho0_i = math.exp(- self.spins.eval[i] / Temp) self.rho0[i] = rho0_i sum += rho0_i self.rho0 /= sum return self.rho0 def chi1(self, omega): c1 = 0j for m in range(self.spins.matrix_size): for n in range(self.spins.matrix_size): # the contribution to chi1 vanishes for n == m, whether gamma is the same for diagonal and non diagonal elements is not relvant here Vmn = self.V[m,n] Vmn_abs2 = Vmn.real*Vmn.real + Vmn.imag*Vmn.imag c1 -= (self.rho0[m] - self.rho0[n]) * Vmn_abs2 / (self.omega_nm(n, m) - omega - 1j * self.gamma); return c1 ################################################ #ExpData Plot Sam's approach dataDC2 = np.loadtxt("testupto30up.txt", comments='%') # , usecols=(0,1,3),unpack=True) fieldDC2 = np.zeros(29) freqDC2 = (dataDC2[650:1415, 0]) / 1e6 freqStartDC2 = freqDC2[0] NumPoints = 765 freqStopDC2 = freqDC2[764] freqStepDC2 = freqDC2[11] - freqDC2[10] IntensityDC2 = np.zeros((29, 765)) # http://python3porting.com/differences.html#range-and-xrange for i in xrange(29): fieldDC2[i] = np.mean(dataDC2[i * 5000:(i + 1) * 5000, 1]) IntensityDC2[i, :] = dataDC2[i * 5000 + 650:i * 5000 + 1415, 3] dA = 5.0 # 45 a = math.radians(90.0) * (1.0 / dA + 1.0) # 91 degree for theta and phi b = a / dA # 45 #step for angles # http://stackoverflow.com/a/2958717/1032286 c = 81.0 / 28.0 # 30 #field step d = 80.0 + c # field limit tau = 5.0 # angles and field Phi = np.arange(0, a, b) Theta = np.arange(0, a, b) Magnetic = np.arange(0, d, c) Phi_deg = np.zeros(len(Phi)) Theta_deg = np.zeros(len(Theta)) print len(Phi), len(Theta) Na = len(Phi) * len(Theta) Np = IntensityDC2.size Nb = len(fieldDC2) LambdaM = np.zeros((Np, Na)) LambdaMepr = np.zeros((Np, Na)) trp = TripletHamiltonian() trp.D = 487.9 trp.E = 72.9 odmr = ODMR_Signal(trp) # for B: 2.9 mT = 81.27236559069694 MHz # 19.9 mT = 557.7 MHz # 12 mT = 336.3 MHz index_Phi = 0 index_a = 0 """ trp.evecs() odmr.update_from_spin_hamiltonian() odmr_from_triplets.update_from_spin_hamiltonian() odmr.load_rho0_from_singlet() odmr.gamma = 1e-2 odmr.gamma_diag = 1e-2 """ for trp.phi in Phi: index_Theta = 0 Phi_deg[index_Phi] = round(math.degrees(Phi[index_Phi])) for trp.theta in Theta: index_B = 0 index_p = 0 # print(index_a) Theta_deg[index_Theta] = round(math.degrees(Theta[index_Theta])) for i in xrange(len(freqDC2)): for trp.B in Magnetic: trp.evecs(trp.D, trp.E, trp.B, trp.theta, trp.phi) odmr.gamma = 1e-2 odmr.gamma_diag = 1e-2 odmr.doV() odmr.Temp = 41600000000 odmr.load_rho0_thermal(odmr.Temp) vals = sorted(trp.evals(trp.D, trp.E, trp.B, trp.theta, trp.phi, mol_basis=True)) x1 = (vals[1].real - vals[0].real) x2 = (vals[2].real - vals[0].real) LambdaM[index_p][index_a] = ((1.0 / (math.pow(((freqDC2[i] - x1)/ tau), 2.0) + 1.0)) + (1.0 / (math.pow(((freqDC2[i] - x2) / tau), 2.0) + 1.0))) * math.sin(trp.theta) LambdaMepr[index_p][index_a] = odmr.chi1(2*math.pi*freqDC2[i]) print math.exp(- vals[0] / odmr.Temp), math.exp(- vals[1] /odmr.Temp),math.exp(- vals[2] /odmr.Temp) print (odmr.rho0[2]-odmr.rho0[0]), (odmr.rho0[1]-odmr.rho0[0]) print odmr.chi1(2*math.pi*freqDC2[i]), odmr.rho0 print odmr.V index_p += 1 index_B += 1 index_a += 1 index_Theta += 1 index_Phi += 1 LamInv = np.linalg.pinv(LambdaM) Experiment = IntensityDC2.flat pVec1 = np.dot(LamInv, Experiment) # read weights from a file pMatrix = np.reshape(pVec1, (len(Phi), len(Theta))) TheoryVec = np.dot(LambdaM, pVec1) TheoryMatr = np.reshape(TheoryVec, (765, 29)) pVec2, rnorm1 = nnls(LambdaM,Experiment) pMatrix2 = np.reshape(pVec2, (len(Phi), len(Theta))) TheoryVec2 = np.dot(LambdaM, pVec2) TheoryMatr2 = np.reshape(TheoryVec2, (765, 29)) pVec3, rnorm2 = nnls(LambdaMepr,Experiment) pMatrix3 = np.reshape(pVec3, (len(Phi), len(Theta))) TheoryVec3 = np.dot(LambdaMepr, pVec3) TheoryMatr3 = np.reshape(TheoryVec3, (765, 29)) gnufile = open('TheoryFromWeights5nnlsEPR.dat', 'w+') Lfile = open('Lambdas5.dat', 'w+') for k in xrange(Na): for i in xrange(765): index_p = 0 for j in xrange(29): Lfile.write(str(freqDC2[i])+ ' ' + str(fieldDC2[j]) + ' ' + str(LambdaM[index_p][k]) + ' ' + str(LambdaMepr[index_p][k])) index_p += 1 Lfile.write("\n") Lfile.close for i in xrange(765): for j in xrange(29): gnufile.write(str(freqDC2[i]) + ' ' + str(fieldDC2[j])+ ' ' + str(TheoryMatr[i][j]) + ' ' + str(TheoryMatr2[i][j]) + ' ' + str(TheoryMatr3[i][j]) + '\n') gnufile.write("\n") gnufile.close
self._init_from_angles(arg1, x2, z3)
conditional_block
MatrixFromWeights_py27.py
import numpy as np from numpy.linalg import eig import cmath import math import sys from functools import reduce #import pandas as pd #import pylab as pl import array #from scipy.fftpack import fft, ifft from scipy.optimize import nnls class
: """ * Rotation : provides a representation for 3D space rotations * using euler angles (ZX'Z'' convention) or rotation matrices """ def _euler2mat_z1x2z3(self, z1=0, x2=0, z3=0): cosz1 = math.cos(z1) sinz1 = math.sin(z1) Z1 = np.array( [[cosz1, -sinz1, 0], [sinz1, cosz1, 0], [0, 0, 1]]) cosx = math.cos(x2) sinx = math.sin(x2) X2 = np.array( [[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]]) cosz3 = math.cos(z3) sinz3 = math.sin(z3) Z3 = np.array( [[cosz3, -sinz3, 0], [sinz3, cosz3, 0], [0, 0, 1]]) return reduce(np.dot, [Z1, X2, Z3]) def _mat2euler(self, M): M = np.asarray(M) try: sy_thresh = np.finfo(M.dtype).eps * 4 except ValueError: sy_thresh = _FLOAT_EPS_4 r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat sy = math.sqrt(r31 * r31 + r32 * r32) if sy > sy_thresh: x2 = math.acos(r33) z1 = math.atan2(r13, -r23) z3 = math.atan2(r31, r32) else: x2 = 0 z3 = 0 z1 = math.atan2(r21, r22) return (z1, x2, z3) def _init_from_angles(self, z1, x2, z3): self._z1, self._x2, self._z3 = z1, x2, z3 self._M = self._euler2mat_z1x2z3(self._z1, self._x2, self._z3) def _init_from_matrix(self, matrix): self._M = np.asarray(matrix) self._z1, self._x2, self._z3 = self._mat2euler(self._M) def __init__(self, arg1=None, x2=None, z3=None): if arg1 is None: self._init_from_angles(0, 0, 0) # loads identity matrix elif x2 is not None: self._init_from_angles(arg1, x2, z3) elif arg1.size == 3: self._init_from_angles(arg1[0], arg1[1], arg1[2]) else: self._init_from_matrix(arg1) def matrix(self, new_matrix=None): if new_matrix is not None: self._init_from_matrix(new_matrix) return self._M def euler_angles(self, z1=None, x2=None, z3=None): if z1 is not None: self._init_from_angles(z1, x2, z3) return (self._z1, self._x2, self._z3) def random(self): V = 2. * math.pi * np.random.random(), np.arccos( 2.0 * np.random.random() - 1.0), 2. * math.pi * np.random.random() self.euler_angles(V) class TripletHamiltonian: def __init__(self): self.Id = np.matrix('1 0 0; 0 1 0; 0 0 1', dtype=np.complex_) self.Sz = np.matrix('1 0 0; 0 0 0; 0 0 -1', dtype=np.complex_) self.Sx = np.matrix('0 1 0; 1 0 1; 0 1 0', dtype=np.complex_) / math.sqrt(2.0) self.Sy = - 1j * np.matrix('0 1 0; -1 0 1; 0 -1 0', dtype=np.complex_) / math.sqrt(2.0) self.matrix_size = 3 def fine_structure(self, D, E, rotation=Rotation()): rotation_matrix = rotation.matrix() rSx = rotation_matrix[0, 0] * self.Sx + rotation_matrix[0, 1] * self.Sy + rotation_matrix[0, 2] * self.Sz rSy = rotation_matrix[1, 0] * self.Sx + rotation_matrix[1, 1] * self.Sy + rotation_matrix[1, 2] * self.Sz rSz = rotation_matrix[2, 0] * self.Sx + rotation_matrix[2, 1] * self.Sy + rotation_matrix[2, 2] * self.Sz return D * (np.dot(rSz, rSz) - 2. * self.Id / 3.) + E * (np.dot(rSy, rSy) - np.dot(rSx, rSx)) def zeeman(self, Bx, By, Bz): return Bx * self.Sx + By * self.Sy + Bz * self.Sz def spin_hamiltonian_mol_basis(self, D, E, B, theta, phi): Bz = B * math.cos(theta) Bx = B * math.sin(theta) * math.cos(phi) By = B * math.sin(theta) * math.sin(phi) return self.fine_structure(D, E) + self.zeeman(Bx, By, Bz) def spin_hamiltonian_field_basis(self, D, E, B, theta, phi): return self.fine_structure(D, E, Rotation(0, -theta, -phi + math.pi / 2.)) + self.zeeman(0, 0, B) #false? check c++ def evals(self, D, E, B, theta=0, phi=0, mol_basis=True): if mol_basis: return np.linalg.eigvals(self.spin_hamiltonian_mol_basis(D, E, B, theta, phi)) else: return np.linalg.eigvals(self.spin_hamiltonian_field_basis(D, E, B, theta, phi)) def evecs(self, D, E, B, theta=0, phi=0): self.eval, self.evec = np.linalg.eigh(self.spin_hamiltonian_mol_basis(D, E, B, theta, phi)) class ODMR_Signal: """ * ODMR_Signal * * Output : Computes ODMR and magnetic resonance signals * * Input : spins, a reference on SpinSystem object * SpinSystem should define * spins.matrix_size * spins.evec * spins.eval * spins.singlet_projector() * spins.Bac_field_basis_matrix() """ def __init__(self, spin_system): self.spins = spin_system self.rho0 = np.empty(self.spins.matrix_size, dtype=float) self.rho2 = np.empty([self.spins.matrix_size, self.spins.matrix_size], dtype=np.complex_) self.gamma = None self.gamma_diag = None def doV(self): self.V = reduce(np.dot, [np.matrix.getH(self.spins.evec), self.spins.Sx, self.spins.evec]) def omega_nm(self, m, n): return self.spins.eval[n] - self.spins.eval[m] def load_rho0_thermal(self, Temp): sum = 0 for i in range(self.spins.matrix_size): rho0_i = math.exp(- self.spins.eval[i] / Temp) self.rho0[i] = rho0_i sum += rho0_i self.rho0 /= sum return self.rho0 def chi1(self, omega): c1 = 0j for m in range(self.spins.matrix_size): for n in range(self.spins.matrix_size): # the contribution to chi1 vanishes for n == m, whether gamma is the same for diagonal and non diagonal elements is not relvant here Vmn = self.V[m,n] Vmn_abs2 = Vmn.real*Vmn.real + Vmn.imag*Vmn.imag c1 -= (self.rho0[m] - self.rho0[n]) * Vmn_abs2 / (self.omega_nm(n, m) - omega - 1j * self.gamma); return c1 ################################################ #ExpData Plot Sam's approach dataDC2 = np.loadtxt("testupto30up.txt", comments='%') # , usecols=(0,1,3),unpack=True) fieldDC2 = np.zeros(29) freqDC2 = (dataDC2[650:1415, 0]) / 1e6 freqStartDC2 = freqDC2[0] NumPoints = 765 freqStopDC2 = freqDC2[764] freqStepDC2 = freqDC2[11] - freqDC2[10] IntensityDC2 = np.zeros((29, 765)) # http://python3porting.com/differences.html#range-and-xrange for i in xrange(29): fieldDC2[i] = np.mean(dataDC2[i * 5000:(i + 1) * 5000, 1]) IntensityDC2[i, :] = dataDC2[i * 5000 + 650:i * 5000 + 1415, 3] dA = 5.0 # 45 a = math.radians(90.0) * (1.0 / dA + 1.0) # 91 degree for theta and phi b = a / dA # 45 #step for angles # http://stackoverflow.com/a/2958717/1032286 c = 81.0 / 28.0 # 30 #field step d = 80.0 + c # field limit tau = 5.0 # angles and field Phi = np.arange(0, a, b) Theta = np.arange(0, a, b) Magnetic = np.arange(0, d, c) Phi_deg = np.zeros(len(Phi)) Theta_deg = np.zeros(len(Theta)) print len(Phi), len(Theta) Na = len(Phi) * len(Theta) Np = IntensityDC2.size Nb = len(fieldDC2) LambdaM = np.zeros((Np, Na)) LambdaMepr = np.zeros((Np, Na)) trp = TripletHamiltonian() trp.D = 487.9 trp.E = 72.9 odmr = ODMR_Signal(trp) # for B: 2.9 mT = 81.27236559069694 MHz # 19.9 mT = 557.7 MHz # 12 mT = 336.3 MHz index_Phi = 0 index_a = 0 """ trp.evecs() odmr.update_from_spin_hamiltonian() odmr_from_triplets.update_from_spin_hamiltonian() odmr.load_rho0_from_singlet() odmr.gamma = 1e-2 odmr.gamma_diag = 1e-2 """ for trp.phi in Phi: index_Theta = 0 Phi_deg[index_Phi] = round(math.degrees(Phi[index_Phi])) for trp.theta in Theta: index_B = 0 index_p = 0 # print(index_a) Theta_deg[index_Theta] = round(math.degrees(Theta[index_Theta])) for i in xrange(len(freqDC2)): for trp.B in Magnetic: trp.evecs(trp.D, trp.E, trp.B, trp.theta, trp.phi) odmr.gamma = 1e-2 odmr.gamma_diag = 1e-2 odmr.doV() odmr.Temp = 41600000000 odmr.load_rho0_thermal(odmr.Temp) vals = sorted(trp.evals(trp.D, trp.E, trp.B, trp.theta, trp.phi, mol_basis=True)) x1 = (vals[1].real - vals[0].real) x2 = (vals[2].real - vals[0].real) LambdaM[index_p][index_a] = ((1.0 / (math.pow(((freqDC2[i] - x1)/ tau), 2.0) + 1.0)) + (1.0 / (math.pow(((freqDC2[i] - x2) / tau), 2.0) + 1.0))) * math.sin(trp.theta) LambdaMepr[index_p][index_a] = odmr.chi1(2*math.pi*freqDC2[i]) print math.exp(- vals[0] / odmr.Temp), math.exp(- vals[1] /odmr.Temp),math.exp(- vals[2] /odmr.Temp) print (odmr.rho0[2]-odmr.rho0[0]), (odmr.rho0[1]-odmr.rho0[0]) print odmr.chi1(2*math.pi*freqDC2[i]), odmr.rho0 print odmr.V index_p += 1 index_B += 1 index_a += 1 index_Theta += 1 index_Phi += 1 LamInv = np.linalg.pinv(LambdaM) Experiment = IntensityDC2.flat pVec1 = np.dot(LamInv, Experiment) # read weights from a file pMatrix = np.reshape(pVec1, (len(Phi), len(Theta))) TheoryVec = np.dot(LambdaM, pVec1) TheoryMatr = np.reshape(TheoryVec, (765, 29)) pVec2, rnorm1 = nnls(LambdaM,Experiment) pMatrix2 = np.reshape(pVec2, (len(Phi), len(Theta))) TheoryVec2 = np.dot(LambdaM, pVec2) TheoryMatr2 = np.reshape(TheoryVec2, (765, 29)) pVec3, rnorm2 = nnls(LambdaMepr,Experiment) pMatrix3 = np.reshape(pVec3, (len(Phi), len(Theta))) TheoryVec3 = np.dot(LambdaMepr, pVec3) TheoryMatr3 = np.reshape(TheoryVec3, (765, 29)) gnufile = open('TheoryFromWeights5nnlsEPR.dat', 'w+') Lfile = open('Lambdas5.dat', 'w+') for k in xrange(Na): for i in xrange(765): index_p = 0 for j in xrange(29): Lfile.write(str(freqDC2[i])+ ' ' + str(fieldDC2[j]) + ' ' + str(LambdaM[index_p][k]) + ' ' + str(LambdaMepr[index_p][k])) index_p += 1 Lfile.write("\n") Lfile.close for i in xrange(765): for j in xrange(29): gnufile.write(str(freqDC2[i]) + ' ' + str(fieldDC2[j])+ ' ' + str(TheoryMatr[i][j]) + ' ' + str(TheoryMatr2[i][j]) + ' ' + str(TheoryMatr3[i][j]) + '\n') gnufile.write("\n") gnufile.close
Rotation
identifier_name
__init__.py
# Software License Agreement (BSD License) # # Copyright (c) 2008, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Revision $Id: rosparam 1641 2008-07-28 21:39:33Z sfkwc $ """ Implementation of the rosparam as well as a library for modifying the state of the ROS Parameter Server using YAML files. """ from __future__ import print_function NAME = 'rosparam' ## namespace key. Use of this in a YAML document specifies the ## namespace of all the params. NOTE: phasing out most use of this ## key. It's still useful in corner cases, but most of its ## functionality can be achieved with command-line arguments. NS = '_ns' import base64 import math import os import re import sys import socket try: from xmlrpc.client import Binary except ImportError: from xmlrpclib import Binary from optparse import OptionParser import yaml import rosgraph from rosgraph.names import script_resolve_name, ns_join, get_ros_namespace, make_caller_id, make_global_ns, GLOBALNS class RosParamException(Exception): """ rosparam base exception type """ pass class RosParamIOException(RosParamException): """ Exception for communication-based (i/o) errors. """ pass # pyyaml customizations for binary and angle data def represent_xml_binary(loader, data): """ Adds a pyyaml serializer to handle xmlrpclib.Binary objects """ data = base64.b64encode(data.data) return loader.represent_scalar(u'tag:yaml.org,2002:binary', data, style='|') def represent_foo(loader, data): return loader.represent_scalar(u'#', data) def construct_yaml_binary(loader, node): """ Overrides pyaml's constructor for binary data. Wraps binary data in xmlrpclib.Binary container instead of straight string representation. """ return Binary(loader.construct_yaml_binary(node)) # register the (de)serializers with pyyaml yaml.add_representer(Binary,represent_xml_binary) yaml.add_constructor(u'tag:yaml.org,2002:binary', construct_yaml_binary) def construct_angle_radians(loader, node): """ python-yaml utility for converting rad(num) into float value """ value = loader.construct_scalar(node).strip() exprvalue = value.replace('pi', 'math.pi') if exprvalue.startswith("rad("): exprvalue = exprvalue[4:-1] try: return float(eval(exprvalue)) except SyntaxError as e: raise RosParamException("invalid radian expression: %s"%value) def construct_angle_degrees(loader, node): """ python-yaml utility for converting deg(num) into float value """ value = loader.construct_scalar(node) exprvalue = value if exprvalue.startswith("deg("): exprvalue = exprvalue.strip()[4:-1] try: return float(exprvalue) * math.pi / 180.0 except ValueError: raise RosParamException("invalid degree value: %s"%value) # utilities def _get_caller_id(): """ :returns: caller ID for rosparam ROS client calls, ``str`` """ return make_caller_id('rosparam-%s'%os.getpid()) def print_params(params, ns): """ Print contents of param dictionary to screen """ if type(params) == dict: for k, v in params.items(): if type(v) == dict: print_params(v, ns_join(ns, k)) else: print("%s=%s"%(ns_join(ns, k), v)) else: print(params) # yaml processing def load_file(filename, default_namespace=None, verbose=False): """ Load the YAML document from the specified file :param filename: name of filename, ``str`` :param default_namespace: namespace to load filename into, ``str`` :returns [(dict, str)...]: list of parameter dictionary and corresponding namespaces for each YAML document in the file :raises: :exc:`RosParamException`: if unable to load contents of filename """ if not filename or filename == '-': f = sys.stdin if verbose: print("reading parameters from stdin") return load_str(f.read(), filename, default_namespace=default_namespace, verbose=verbose) else: if not os.path.isfile(filename): raise RosParamException("file [%s] does not exist"%filename) if verbose: print("reading parameters from [%s]"%filename) with open(filename, 'r') as f: return load_str(f.read(), filename, default_namespace=default_namespace, verbose=verbose) def load_str(str, filename, default_namespace=None, verbose=False): """ Load the YAML document as a string :param filename: name of filename, only used for debugging, ``str`` :param default_namespace: namespace to load filename into, ``str`` :param str: YAML text, ``str`` :returns: list of parameter dictionary and corresponding namespaces for each YAML document in the file, ``[(dict, str)...]`` """ paramlist = [] default_namespace = default_namespace or get_ros_namespace() for doc in yaml.load_all(str): if NS in doc: ns = ns_join(default_namespace, doc.get(NS, None)) if verbose: print("reading parameters into namespace [%s]"%ns) del doc[NS] else: ns = default_namespace paramlist.append((doc, ns)) return paramlist # DUMP/GET def get_param_server(): return rosgraph.Master(_get_caller_id()) def get_param(param): """ Download a parameter from Parameter Server :param param: parameter name to retrieve from parameter server. If param is a parameter namespace, entire parameter subtree will be downloaded, ``str`` """ try: return get_param_server().getParam(param) except socket.error: raise RosParamIOException("Unable to communicate with master!") # #698 def _pretty_print(value, indent=''): """ Pretty print get value :param value: value to print :param indent: indent level, used for recursive calls, ``str`` """ keys = list(value.keys()) keys.sort() for k in keys: v = value[k] if type(v) == dict: print("%s%s:"%(indent, k)) _pretty_print(v, indent+' ') elif type(v) == str: if '\n' in v: print(indent+'%s: |'%k) for l in v.split('\n'): print(indent+' '+l) else: print("%s%s: %s"%(indent, k, v)) else: dump = yaml.dump(v) # #1617 # newer versions of python-yaml append the '...' document end # syntax. as YAML functions fine w/o it, and as it is # confusing to users who are just getting a single scalar, we # strip it if dump.endswith('\n...\n'): dump = dump[:-4] sys.stdout.write("%s%s: %s"%(indent, k, dump)) def _rosparam_cmd_get_param(param, pretty=False, verbose=False): """ Download a parameter tree and print to screen :param param: parameter name to retrieve from Parameter Server. If param is a parameter namespace, entire parameter subtree will be downloaded, ``str`` """ # yaml.dump has a \n at the end, so use stdout.write instead of print if verbose: print("getting parameter [%s]"%param) try: val = get_param(param) except rosgraph.masterapi.Error as e: raise RosParamException(str(e)) if pretty and type(val) in [dict, str]: if type(val) == dict: _pretty_print(val) else: print(val) else: dump = yaml.dump(val) # #1617 # newer versions of python-yaml append the '...' document end # syntax. as YAML functions fine w/o it, and as it is # confusing to users who are just getting a single scalar, we # strip it if dump.endswith('\n...\n'): dump = dump[:-5] # #3761 add newline in output sys.stdout.write("%s\n"%(dump)) def dump_params(filename, param, verbose=False): """ Download a parameter tree from the Parameter Server and store in a yaml file :param filename: name of file to save YAML representation, ``str`` :param param: name of parameter/namespace to dump, ``str`` :param verbose: print verbose output for debugging, ``bool`` """ tree = get_param(param) if verbose: print_params(tree, param) if not filename: f = sys.stdout yaml.dump(tree, f) else: f = open(filename, 'w') try: yaml.dump(tree, f) finally: f.close() def delete_param(param, verbose=False): """ Delete a parameter from the Parameter Server :param param: parameter name, ``str`` :param verbose: print verbose output for debugging, ``bool`` """ try: if param == GLOBALNS: # not allowed to delete the root of the tree as it must always # have a value. the equivalent command is setting the root to an # empty dictionary get_param_server().setParam(GLOBALNS, {}) if verbose: print("deleted ENTIRE parameter server") else: get_param_server().deleteParam(param) if verbose: print("deleted parameter [%s]"%param) except socket.error: raise RosParamIOException("Unable to communicate with master!") # LOAD/SET def set_param_raw(param, value, verbose=False): """ Set param on the Parameter Server. Unlike L{set_param()}, this takes in a Python value to set instead of YAML. :param param: parameter name, ``str`` :param value XmlRpcLegalValue: value to upload, ``XmlRpcLegalValue`` """ if type(value) == dict: # #1098 changing dictionary behavior to be an update, rather # than replace behavior. for k, v in value.items(): # dictionary keys must be non-unicode strings if isinstance(k, str): set_param_raw(ns_join(param, k), v, verbose=verbose) else: raise RosParamException("YAML dictionaries must have string keys. Invalid dictionary is:\n%s"%value) else: try: expected_type = long except NameError : expected_type = int if type(value) == expected_type: if value > sys.maxsize: raise RosParamException("Overflow: Parameter Server integers must be 32-bit signed integers:\n\t-%s <= value <= %s"%(maxint - 1, maxint)) try: get_param_server().setParam(param, value) except socket.error: raise RosParamIOException("Unable to communicate with master!") if verbose: print("set parameter [%s] to [%s]"%(param, value)) def set_param(param, value, verbose=False): """ Set param on the ROS parameter server using a YAML value. :param param: parameter name, ``str`` :param value: yaml-encoded value, ``str`` """ set_param_raw(param, yaml.load(value), verbose=verbose) def upload_params(ns, values, verbose=False): """ Upload params to the Parameter Server :param values: key/value dictionary, where keys are parameter names and values are parameter values, ``dict`` :param ns: namespace to load parameters into, ``str`` """ if ns == '/' and not type(values) == dict: raise RosParamException("global / can only be set to a dictionary") if verbose: print_params(values, ns) set_param_raw(ns, values) # LIST def list_params(ns): """ Get list of parameters in ns :param ns: namespace to match, ``str`` """ try: ns = make_global_ns(ns) names = get_param_server().getParamNames() names.sort() return [n for n in names if n.startswith(ns)] except socket.error: raise RosParamIOException("Unable to communicate with master!") # COMMAND-LINE PARSING def _rosparam_cmd_get_dump(cmd, argv): """ Process command line for rosparam get/dump, e.g.:: rosparam get param rosparam dump file.yaml [namespace] :param cmd: command ('get' or 'dump'), ``str`` :param argv: command-line args, ``str`` """ # get and dump are equivalent functionality, just different arguments if cmd == 'dump': parser = OptionParser(usage="usage: %prog dump [options] file [namespace]", prog=NAME) elif cmd == 'get': parser = OptionParser(usage="usage: %prog get [options] parameter", prog=NAME) parser.add_option("-p", dest="pretty", default=False, action="store_true", help="pretty print. WARNING: not YAML-safe") parser.add_option("-v", dest="verbose", default=False, action="store_true", help="turn on verbose output") options, args = parser.parse_args(argv[2:]) arg = None ns = '' if len(args) == 0: if cmd == 'get': parser.error("invalid arguments. Please specify a parameter name") elif len(args) == 1: arg = args[0] elif len(args) == 2 and cmd == 'dump': arg = args[0] ns = args[1] else: parser.error("too many arguments") if cmd == 'get': _rosparam_cmd_get_param(script_resolve_name(NAME, arg), pretty=options.pretty, verbose=options.verbose) else: if options.verbose: print("dumping namespace [%s] to file [%s]"%(ns, arg)) dump_params(arg, script_resolve_name(NAME, ns), verbose=options.verbose) def _set_optparse_neg_args(parser, argv): # we don't use optparse to parse actual arguments, just options, # due to the fact that optparse doesn't handle negative numbers as # arguments. This parsing is complicated by the fact that we still # need to respect argument-bearing options like --textfile. args = [] optparse_args = [] skip = False for s in argv[2:]: if s.startswith('-'): if s in ['-t', '--textfile', '-b', '--binfile']: skip = True optparse_args.append(s) elif skip: parser.error("-t and --textfile options require an argument") elif len(s) > 1 and ord(s[1]) >= ord('0') and ord(s[1]) <= ord('9'): args.append(s) else: optparse_args.append(s) else: if skip: skip = False optparse_args.append(s) else: args.append(s) options, _ = parser.parse_args(optparse_args) return options, args # TODO: break this into separate routines, has gotten too ugly to multiplex def _rosparam_cmd_set_load(cmd, argv): """ Process command line for rosparam set/load, e.g.:: rosparam load file.yaml [namespace] rosparam set param value :param cmd: command name, ``str`` :param argv: command-line args, ``str`` """ if cmd == 'load': parser = OptionParser(usage="usage: %prog load [options] file [namespace]", prog=NAME) elif cmd == 'set': parser = OptionParser(usage="usage: %prog set [options] parameter value", prog=NAME) parser.add_option("-t", "--textfile", dest="text_file", default=None, metavar="TEXT_FILE", help="set parameters to contents of text file") parser.add_option("-b", "--binfile", dest="bin_file", default=None, metavar="BINARY_FILE", help="set parameters to contents of binary file") parser.add_option("-v", dest="verbose", default=False, action="store_true", help="turn on verbose output") if cmd == 'set': options, args = _set_optparse_neg_args(parser, argv) if options.text_file and options.bin_file: parser.error("you may only specify one of --textfile or --binfile") else: options, args = parser.parse_args(argv[2:]) arg2 = None if len(args) == 0: if cmd == 'load': parser.error("invalid arguments. Please specify a file name or - for stdin") elif cmd == 'set': parser.error("invalid arguments. Please specify a parameter name") elif len(args) == 1: arg = args[0] if cmd == 'set' and not (options.text_file or options.bin_file): parser.error("invalid arguments. Please specify a parameter value") elif len(args) == 2: arg = args[0] arg2 = args[1] else: parser.error("too many arguments") if cmd == 'set': name = script_resolve_name(NAME, arg) # #2647 if options.text_file: if not os.path.isfile(options.text_file): parser.error("file '%s' does not exist"%(options.text_file)) with open(options.text_file) as f: arg2 = f.read() set_param_raw(name, arg2, verbose=options.verbose) elif options.bin_file: with open(options.bin_file, 'rb') as f: arg2 = Binary(f.read()) set_param_raw(name, arg2, verbose=options.verbose) else: # #2237: the empty string is really hard to specify on the # command-line due to bash quoting rules. We cheat here and # let an empty Python string be an empty YAML string (instead # of YAML null, which has no meaning to the Parameter Server # anyway). if arg2 == '': arg2 = '!!str' set_param(name, arg2, verbose=options.verbose) else: paramlist = load_file(arg, default_namespace=script_resolve_name(NAME, arg2), verbose=options.verbose) for params,ns in paramlist: upload_params(ns, params, verbose=options.verbose) def _rosparam_cmd_list(argv): """ Process command line for rosparam set/load, e.g.:: rosparam load file.yaml [namespace] rosparam set param value :param argv: command-line args, ``str`` """ parser = OptionParser(usage="usage: %prog list [namespace]", prog=NAME) options, args = parser.parse_args(argv[2:]) ns = GLOBALNS if len(args) == 1: ns = script_resolve_name(NAME, args[0]) elif len(args) == 2: parser.error("too many arguments") print('\n'.join(list_params(ns))) def _rosparam_cmd_delete(argv):
def _fullusage(): """ Prints rosparam usage """ print("""rosparam is a command-line tool for getting, setting, and deleting parameters from the ROS Parameter Server. Commands: \trosparam set\tset parameter \trosparam get\tget parameter \trosparam load\tload parameters from file \trosparam dump\tdump parameters to file \trosparam delete\tdelete parameter \trosparam list\tlist parameter names """) sys.exit(0) def yamlmain(argv=None): """ Command-line main routine. Loads in one or more input files :param argv: command-line arguments or None to use sys.argv, ``[str]`` """ if argv is None: argv = sys.argv if len(argv) == 1: _fullusage() try: command = argv[1] if command in ['get', 'dump']: _rosparam_cmd_get_dump(command, argv) elif command in ['set', 'load']: _rosparam_cmd_set_load(command, argv) elif command in ['delete']: _rosparam_cmd_delete(argv) elif command == 'list': _rosparam_cmd_list(argv) else: _fullusage() except RosParamException as e: print("ERROR: "+str(e), file=sys.stderr) sys.exit(1) # YAML configuration. Doxygen does not like these being higher up in the code yaml.add_constructor(u'!radians', construct_angle_radians) yaml.add_constructor(u'!degrees', construct_angle_degrees) # allow both !degrees 180, !radians 2*pi pattern = re.compile(r'^deg\([^\)]*\)$') yaml.add_implicit_resolver(u'!degrees', pattern, first="deg(") pattern = re.compile(r'^rad\([^\)]*\)$') yaml.add_implicit_resolver(u'!radians', pattern, first="rad(")
""" Process command line for rosparam delete, e.g.:: rosparam delete param :param cmd: command name, ``str`` :param argv: command-line args, ``str`` """ parser = OptionParser(usage="usage: %prog delete [options] parameter", prog=NAME) parser.add_option("-v", dest="verbose", default=False, action="store_true", help="turn on verbose output") options, args = parser.parse_args(argv[2:]) arg2 = None if len(args) == 0: parser.error("invalid arguments. Please specify a parameter name") elif len(args) == 1: arg = args[0] else: parser.error("too many arguments") try: delete_param(script_resolve_name(NAME, arg), verbose=options.verbose) except rosgraph.masterapi.Error as e: raise RosParamException(str(e))
identifier_body
__init__.py
# Software License Agreement (BSD License) # # Copyright (c) 2008, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Revision $Id: rosparam 1641 2008-07-28 21:39:33Z sfkwc $ """ Implementation of the rosparam as well as a library for modifying the state of the ROS Parameter Server using YAML files. """ from __future__ import print_function NAME = 'rosparam' ## namespace key. Use of this in a YAML document specifies the ## namespace of all the params. NOTE: phasing out most use of this ## key. It's still useful in corner cases, but most of its ## functionality can be achieved with command-line arguments. NS = '_ns' import base64 import math import os import re import sys import socket try: from xmlrpc.client import Binary except ImportError: from xmlrpclib import Binary from optparse import OptionParser import yaml import rosgraph from rosgraph.names import script_resolve_name, ns_join, get_ros_namespace, make_caller_id, make_global_ns, GLOBALNS class RosParamException(Exception): """ rosparam base exception type """ pass class RosParamIOException(RosParamException): """ Exception for communication-based (i/o) errors. """ pass # pyyaml customizations for binary and angle data def represent_xml_binary(loader, data): """ Adds a pyyaml serializer to handle xmlrpclib.Binary objects """ data = base64.b64encode(data.data) return loader.represent_scalar(u'tag:yaml.org,2002:binary', data, style='|') def represent_foo(loader, data): return loader.represent_scalar(u'#', data) def construct_yaml_binary(loader, node): """ Overrides pyaml's constructor for binary data. Wraps binary data in xmlrpclib.Binary container instead of straight string representation. """ return Binary(loader.construct_yaml_binary(node)) # register the (de)serializers with pyyaml yaml.add_representer(Binary,represent_xml_binary) yaml.add_constructor(u'tag:yaml.org,2002:binary', construct_yaml_binary) def construct_angle_radians(loader, node): """ python-yaml utility for converting rad(num) into float value """ value = loader.construct_scalar(node).strip() exprvalue = value.replace('pi', 'math.pi') if exprvalue.startswith("rad("): exprvalue = exprvalue[4:-1] try: return float(eval(exprvalue)) except SyntaxError as e: raise RosParamException("invalid radian expression: %s"%value) def construct_angle_degrees(loader, node): """ python-yaml utility for converting deg(num) into float value """ value = loader.construct_scalar(node) exprvalue = value if exprvalue.startswith("deg("): exprvalue = exprvalue.strip()[4:-1] try: return float(exprvalue) * math.pi / 180.0 except ValueError: raise RosParamException("invalid degree value: %s"%value) # utilities def _get_caller_id(): """ :returns: caller ID for rosparam ROS client calls, ``str`` """ return make_caller_id('rosparam-%s'%os.getpid()) def print_params(params, ns): """ Print contents of param dictionary to screen """ if type(params) == dict: for k, v in params.items(): if type(v) == dict: print_params(v, ns_join(ns, k)) else: print("%s=%s"%(ns_join(ns, k), v)) else: print(params) # yaml processing def load_file(filename, default_namespace=None, verbose=False): """ Load the YAML document from the specified file :param filename: name of filename, ``str`` :param default_namespace: namespace to load filename into, ``str`` :returns [(dict, str)...]: list of parameter dictionary and corresponding namespaces for each YAML document in the file :raises: :exc:`RosParamException`: if unable to load contents of filename """ if not filename or filename == '-': f = sys.stdin if verbose: print("reading parameters from stdin") return load_str(f.read(), filename, default_namespace=default_namespace, verbose=verbose) else: if not os.path.isfile(filename): raise RosParamException("file [%s] does not exist"%filename) if verbose: print("reading parameters from [%s]"%filename) with open(filename, 'r') as f: return load_str(f.read(), filename, default_namespace=default_namespace, verbose=verbose) def load_str(str, filename, default_namespace=None, verbose=False): """ Load the YAML document as a string :param filename: name of filename, only used for debugging, ``str`` :param default_namespace: namespace to load filename into, ``str`` :param str: YAML text, ``str`` :returns: list of parameter dictionary and corresponding namespaces for each YAML document in the file, ``[(dict, str)...]`` """ paramlist = [] default_namespace = default_namespace or get_ros_namespace() for doc in yaml.load_all(str): if NS in doc: ns = ns_join(default_namespace, doc.get(NS, None)) if verbose: print("reading parameters into namespace [%s]"%ns) del doc[NS] else: ns = default_namespace paramlist.append((doc, ns)) return paramlist # DUMP/GET def get_param_server(): return rosgraph.Master(_get_caller_id()) def get_param(param): """ Download a parameter from Parameter Server :param param: parameter name to retrieve from parameter server. If param is a parameter namespace, entire parameter subtree will be downloaded, ``str`` """ try: return get_param_server().getParam(param) except socket.error: raise RosParamIOException("Unable to communicate with master!") # #698 def _pretty_print(value, indent=''): """ Pretty print get value :param value: value to print :param indent: indent level, used for recursive calls, ``str`` """ keys = list(value.keys()) keys.sort() for k in keys: v = value[k] if type(v) == dict: print("%s%s:"%(indent, k)) _pretty_print(v, indent+' ') elif type(v) == str: if '\n' in v: print(indent+'%s: |'%k) for l in v.split('\n'): print(indent+' '+l) else:
else: dump = yaml.dump(v) # #1617 # newer versions of python-yaml append the '...' document end # syntax. as YAML functions fine w/o it, and as it is # confusing to users who are just getting a single scalar, we # strip it if dump.endswith('\n...\n'): dump = dump[:-4] sys.stdout.write("%s%s: %s"%(indent, k, dump)) def _rosparam_cmd_get_param(param, pretty=False, verbose=False): """ Download a parameter tree and print to screen :param param: parameter name to retrieve from Parameter Server. If param is a parameter namespace, entire parameter subtree will be downloaded, ``str`` """ # yaml.dump has a \n at the end, so use stdout.write instead of print if verbose: print("getting parameter [%s]"%param) try: val = get_param(param) except rosgraph.masterapi.Error as e: raise RosParamException(str(e)) if pretty and type(val) in [dict, str]: if type(val) == dict: _pretty_print(val) else: print(val) else: dump = yaml.dump(val) # #1617 # newer versions of python-yaml append the '...' document end # syntax. as YAML functions fine w/o it, and as it is # confusing to users who are just getting a single scalar, we # strip it if dump.endswith('\n...\n'): dump = dump[:-5] # #3761 add newline in output sys.stdout.write("%s\n"%(dump)) def dump_params(filename, param, verbose=False): """ Download a parameter tree from the Parameter Server and store in a yaml file :param filename: name of file to save YAML representation, ``str`` :param param: name of parameter/namespace to dump, ``str`` :param verbose: print verbose output for debugging, ``bool`` """ tree = get_param(param) if verbose: print_params(tree, param) if not filename: f = sys.stdout yaml.dump(tree, f) else: f = open(filename, 'w') try: yaml.dump(tree, f) finally: f.close() def delete_param(param, verbose=False): """ Delete a parameter from the Parameter Server :param param: parameter name, ``str`` :param verbose: print verbose output for debugging, ``bool`` """ try: if param == GLOBALNS: # not allowed to delete the root of the tree as it must always # have a value. the equivalent command is setting the root to an # empty dictionary get_param_server().setParam(GLOBALNS, {}) if verbose: print("deleted ENTIRE parameter server") else: get_param_server().deleteParam(param) if verbose: print("deleted parameter [%s]"%param) except socket.error: raise RosParamIOException("Unable to communicate with master!") # LOAD/SET def set_param_raw(param, value, verbose=False): """ Set param on the Parameter Server. Unlike L{set_param()}, this takes in a Python value to set instead of YAML. :param param: parameter name, ``str`` :param value XmlRpcLegalValue: value to upload, ``XmlRpcLegalValue`` """ if type(value) == dict: # #1098 changing dictionary behavior to be an update, rather # than replace behavior. for k, v in value.items(): # dictionary keys must be non-unicode strings if isinstance(k, str): set_param_raw(ns_join(param, k), v, verbose=verbose) else: raise RosParamException("YAML dictionaries must have string keys. Invalid dictionary is:\n%s"%value) else: try: expected_type = long except NameError : expected_type = int if type(value) == expected_type: if value > sys.maxsize: raise RosParamException("Overflow: Parameter Server integers must be 32-bit signed integers:\n\t-%s <= value <= %s"%(maxint - 1, maxint)) try: get_param_server().setParam(param, value) except socket.error: raise RosParamIOException("Unable to communicate with master!") if verbose: print("set parameter [%s] to [%s]"%(param, value)) def set_param(param, value, verbose=False): """ Set param on the ROS parameter server using a YAML value. :param param: parameter name, ``str`` :param value: yaml-encoded value, ``str`` """ set_param_raw(param, yaml.load(value), verbose=verbose) def upload_params(ns, values, verbose=False): """ Upload params to the Parameter Server :param values: key/value dictionary, where keys are parameter names and values are parameter values, ``dict`` :param ns: namespace to load parameters into, ``str`` """ if ns == '/' and not type(values) == dict: raise RosParamException("global / can only be set to a dictionary") if verbose: print_params(values, ns) set_param_raw(ns, values) # LIST def list_params(ns): """ Get list of parameters in ns :param ns: namespace to match, ``str`` """ try: ns = make_global_ns(ns) names = get_param_server().getParamNames() names.sort() return [n for n in names if n.startswith(ns)] except socket.error: raise RosParamIOException("Unable to communicate with master!") # COMMAND-LINE PARSING def _rosparam_cmd_get_dump(cmd, argv): """ Process command line for rosparam get/dump, e.g.:: rosparam get param rosparam dump file.yaml [namespace] :param cmd: command ('get' or 'dump'), ``str`` :param argv: command-line args, ``str`` """ # get and dump are equivalent functionality, just different arguments if cmd == 'dump': parser = OptionParser(usage="usage: %prog dump [options] file [namespace]", prog=NAME) elif cmd == 'get': parser = OptionParser(usage="usage: %prog get [options] parameter", prog=NAME) parser.add_option("-p", dest="pretty", default=False, action="store_true", help="pretty print. WARNING: not YAML-safe") parser.add_option("-v", dest="verbose", default=False, action="store_true", help="turn on verbose output") options, args = parser.parse_args(argv[2:]) arg = None ns = '' if len(args) == 0: if cmd == 'get': parser.error("invalid arguments. Please specify a parameter name") elif len(args) == 1: arg = args[0] elif len(args) == 2 and cmd == 'dump': arg = args[0] ns = args[1] else: parser.error("too many arguments") if cmd == 'get': _rosparam_cmd_get_param(script_resolve_name(NAME, arg), pretty=options.pretty, verbose=options.verbose) else: if options.verbose: print("dumping namespace [%s] to file [%s]"%(ns, arg)) dump_params(arg, script_resolve_name(NAME, ns), verbose=options.verbose) def _set_optparse_neg_args(parser, argv): # we don't use optparse to parse actual arguments, just options, # due to the fact that optparse doesn't handle negative numbers as # arguments. This parsing is complicated by the fact that we still # need to respect argument-bearing options like --textfile. args = [] optparse_args = [] skip = False for s in argv[2:]: if s.startswith('-'): if s in ['-t', '--textfile', '-b', '--binfile']: skip = True optparse_args.append(s) elif skip: parser.error("-t and --textfile options require an argument") elif len(s) > 1 and ord(s[1]) >= ord('0') and ord(s[1]) <= ord('9'): args.append(s) else: optparse_args.append(s) else: if skip: skip = False optparse_args.append(s) else: args.append(s) options, _ = parser.parse_args(optparse_args) return options, args # TODO: break this into separate routines, has gotten too ugly to multiplex def _rosparam_cmd_set_load(cmd, argv): """ Process command line for rosparam set/load, e.g.:: rosparam load file.yaml [namespace] rosparam set param value :param cmd: command name, ``str`` :param argv: command-line args, ``str`` """ if cmd == 'load': parser = OptionParser(usage="usage: %prog load [options] file [namespace]", prog=NAME) elif cmd == 'set': parser = OptionParser(usage="usage: %prog set [options] parameter value", prog=NAME) parser.add_option("-t", "--textfile", dest="text_file", default=None, metavar="TEXT_FILE", help="set parameters to contents of text file") parser.add_option("-b", "--binfile", dest="bin_file", default=None, metavar="BINARY_FILE", help="set parameters to contents of binary file") parser.add_option("-v", dest="verbose", default=False, action="store_true", help="turn on verbose output") if cmd == 'set': options, args = _set_optparse_neg_args(parser, argv) if options.text_file and options.bin_file: parser.error("you may only specify one of --textfile or --binfile") else: options, args = parser.parse_args(argv[2:]) arg2 = None if len(args) == 0: if cmd == 'load': parser.error("invalid arguments. Please specify a file name or - for stdin") elif cmd == 'set': parser.error("invalid arguments. Please specify a parameter name") elif len(args) == 1: arg = args[0] if cmd == 'set' and not (options.text_file or options.bin_file): parser.error("invalid arguments. Please specify a parameter value") elif len(args) == 2: arg = args[0] arg2 = args[1] else: parser.error("too many arguments") if cmd == 'set': name = script_resolve_name(NAME, arg) # #2647 if options.text_file: if not os.path.isfile(options.text_file): parser.error("file '%s' does not exist"%(options.text_file)) with open(options.text_file) as f: arg2 = f.read() set_param_raw(name, arg2, verbose=options.verbose) elif options.bin_file: with open(options.bin_file, 'rb') as f: arg2 = Binary(f.read()) set_param_raw(name, arg2, verbose=options.verbose) else: # #2237: the empty string is really hard to specify on the # command-line due to bash quoting rules. We cheat here and # let an empty Python string be an empty YAML string (instead # of YAML null, which has no meaning to the Parameter Server # anyway). if arg2 == '': arg2 = '!!str' set_param(name, arg2, verbose=options.verbose) else: paramlist = load_file(arg, default_namespace=script_resolve_name(NAME, arg2), verbose=options.verbose) for params,ns in paramlist: upload_params(ns, params, verbose=options.verbose) def _rosparam_cmd_list(argv): """ Process command line for rosparam set/load, e.g.:: rosparam load file.yaml [namespace] rosparam set param value :param argv: command-line args, ``str`` """ parser = OptionParser(usage="usage: %prog list [namespace]", prog=NAME) options, args = parser.parse_args(argv[2:]) ns = GLOBALNS if len(args) == 1: ns = script_resolve_name(NAME, args[0]) elif len(args) == 2: parser.error("too many arguments") print('\n'.join(list_params(ns))) def _rosparam_cmd_delete(argv): """ Process command line for rosparam delete, e.g.:: rosparam delete param :param cmd: command name, ``str`` :param argv: command-line args, ``str`` """ parser = OptionParser(usage="usage: %prog delete [options] parameter", prog=NAME) parser.add_option("-v", dest="verbose", default=False, action="store_true", help="turn on verbose output") options, args = parser.parse_args(argv[2:]) arg2 = None if len(args) == 0: parser.error("invalid arguments. Please specify a parameter name") elif len(args) == 1: arg = args[0] else: parser.error("too many arguments") try: delete_param(script_resolve_name(NAME, arg), verbose=options.verbose) except rosgraph.masterapi.Error as e: raise RosParamException(str(e)) def _fullusage(): """ Prints rosparam usage """ print("""rosparam is a command-line tool for getting, setting, and deleting parameters from the ROS Parameter Server. Commands: \trosparam set\tset parameter \trosparam get\tget parameter \trosparam load\tload parameters from file \trosparam dump\tdump parameters to file \trosparam delete\tdelete parameter \trosparam list\tlist parameter names """) sys.exit(0) def yamlmain(argv=None): """ Command-line main routine. Loads in one or more input files :param argv: command-line arguments or None to use sys.argv, ``[str]`` """ if argv is None: argv = sys.argv if len(argv) == 1: _fullusage() try: command = argv[1] if command in ['get', 'dump']: _rosparam_cmd_get_dump(command, argv) elif command in ['set', 'load']: _rosparam_cmd_set_load(command, argv) elif command in ['delete']: _rosparam_cmd_delete(argv) elif command == 'list': _rosparam_cmd_list(argv) else: _fullusage() except RosParamException as e: print("ERROR: "+str(e), file=sys.stderr) sys.exit(1) # YAML configuration. Doxygen does not like these being higher up in the code yaml.add_constructor(u'!radians', construct_angle_radians) yaml.add_constructor(u'!degrees', construct_angle_degrees) # allow both !degrees 180, !radians 2*pi pattern = re.compile(r'^deg\([^\)]*\)$') yaml.add_implicit_resolver(u'!degrees', pattern, first="deg(") pattern = re.compile(r'^rad\([^\)]*\)$') yaml.add_implicit_resolver(u'!radians', pattern, first="rad(")
print("%s%s: %s"%(indent, k, v))
conditional_block
__init__.py
# Software License Agreement (BSD License) # # Copyright (c) 2008, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Revision $Id: rosparam 1641 2008-07-28 21:39:33Z sfkwc $ """ Implementation of the rosparam as well as a library for modifying the state of the ROS Parameter Server using YAML files. """ from __future__ import print_function NAME = 'rosparam' ## namespace key. Use of this in a YAML document specifies the ## namespace of all the params. NOTE: phasing out most use of this ## key. It's still useful in corner cases, but most of its ## functionality can be achieved with command-line arguments. NS = '_ns' import base64 import math import os import re import sys import socket try: from xmlrpc.client import Binary except ImportError: from xmlrpclib import Binary from optparse import OptionParser import yaml import rosgraph from rosgraph.names import script_resolve_name, ns_join, get_ros_namespace, make_caller_id, make_global_ns, GLOBALNS class RosParamException(Exception): """ rosparam base exception type """ pass class RosParamIOException(RosParamException): """ Exception for communication-based (i/o) errors. """ pass # pyyaml customizations for binary and angle data def represent_xml_binary(loader, data): """ Adds a pyyaml serializer to handle xmlrpclib.Binary objects """ data = base64.b64encode(data.data) return loader.represent_scalar(u'tag:yaml.org,2002:binary', data, style='|') def represent_foo(loader, data): return loader.represent_scalar(u'#', data) def construct_yaml_binary(loader, node): """ Overrides pyaml's constructor for binary data. Wraps binary data in xmlrpclib.Binary container instead of straight string representation. """ return Binary(loader.construct_yaml_binary(node)) # register the (de)serializers with pyyaml yaml.add_representer(Binary,represent_xml_binary) yaml.add_constructor(u'tag:yaml.org,2002:binary', construct_yaml_binary) def construct_angle_radians(loader, node): """ python-yaml utility for converting rad(num) into float value """ value = loader.construct_scalar(node).strip() exprvalue = value.replace('pi', 'math.pi') if exprvalue.startswith("rad("): exprvalue = exprvalue[4:-1] try: return float(eval(exprvalue)) except SyntaxError as e: raise RosParamException("invalid radian expression: %s"%value) def construct_angle_degrees(loader, node): """ python-yaml utility for converting deg(num) into float value """ value = loader.construct_scalar(node) exprvalue = value if exprvalue.startswith("deg("): exprvalue = exprvalue.strip()[4:-1] try: return float(exprvalue) * math.pi / 180.0 except ValueError: raise RosParamException("invalid degree value: %s"%value) # utilities def _get_caller_id(): """ :returns: caller ID for rosparam ROS client calls, ``str`` """ return make_caller_id('rosparam-%s'%os.getpid()) def print_params(params, ns): """ Print contents of param dictionary to screen """ if type(params) == dict: for k, v in params.items(): if type(v) == dict: print_params(v, ns_join(ns, k)) else: print("%s=%s"%(ns_join(ns, k), v)) else: print(params) # yaml processing def load_file(filename, default_namespace=None, verbose=False): """ Load the YAML document from the specified file :param filename: name of filename, ``str`` :param default_namespace: namespace to load filename into, ``str`` :returns [(dict, str)...]: list of parameter dictionary and corresponding namespaces for each YAML document in the file :raises: :exc:`RosParamException`: if unable to load contents of filename """ if not filename or filename == '-': f = sys.stdin if verbose: print("reading parameters from stdin") return load_str(f.read(), filename, default_namespace=default_namespace, verbose=verbose) else: if not os.path.isfile(filename): raise RosParamException("file [%s] does not exist"%filename) if verbose: print("reading parameters from [%s]"%filename) with open(filename, 'r') as f: return load_str(f.read(), filename, default_namespace=default_namespace, verbose=verbose) def load_str(str, filename, default_namespace=None, verbose=False): """ Load the YAML document as a string :param filename: name of filename, only used for debugging, ``str`` :param default_namespace: namespace to load filename into, ``str`` :param str: YAML text, ``str`` :returns: list of parameter dictionary and corresponding namespaces for each YAML document in the file, ``[(dict, str)...]`` """ paramlist = [] default_namespace = default_namespace or get_ros_namespace() for doc in yaml.load_all(str): if NS in doc: ns = ns_join(default_namespace, doc.get(NS, None)) if verbose: print("reading parameters into namespace [%s]"%ns) del doc[NS] else: ns = default_namespace paramlist.append((doc, ns)) return paramlist # DUMP/GET def get_param_server(): return rosgraph.Master(_get_caller_id()) def get_param(param): """ Download a parameter from Parameter Server :param param: parameter name to retrieve from parameter server. If param is a parameter namespace, entire parameter subtree will be downloaded, ``str`` """ try: return get_param_server().getParam(param) except socket.error: raise RosParamIOException("Unable to communicate with master!") # #698 def _pretty_print(value, indent=''): """ Pretty print get value :param value: value to print :param indent: indent level, used for recursive calls, ``str`` """ keys = list(value.keys()) keys.sort() for k in keys: v = value[k] if type(v) == dict: print("%s%s:"%(indent, k)) _pretty_print(v, indent+' ') elif type(v) == str: if '\n' in v: print(indent+'%s: |'%k) for l in v.split('\n'): print(indent+' '+l) else: print("%s%s: %s"%(indent, k, v)) else: dump = yaml.dump(v) # #1617 # newer versions of python-yaml append the '...' document end # syntax. as YAML functions fine w/o it, and as it is # confusing to users who are just getting a single scalar, we # strip it if dump.endswith('\n...\n'): dump = dump[:-4] sys.stdout.write("%s%s: %s"%(indent, k, dump)) def _rosparam_cmd_get_param(param, pretty=False, verbose=False): """ Download a parameter tree and print to screen :param param: parameter name to retrieve from Parameter Server. If param is a parameter namespace, entire parameter subtree will be downloaded, ``str`` """ # yaml.dump has a \n at the end, so use stdout.write instead of print if verbose: print("getting parameter [%s]"%param) try: val = get_param(param) except rosgraph.masterapi.Error as e: raise RosParamException(str(e)) if pretty and type(val) in [dict, str]: if type(val) == dict: _pretty_print(val) else:
# #1617 # newer versions of python-yaml append the '...' document end # syntax. as YAML functions fine w/o it, and as it is # confusing to users who are just getting a single scalar, we # strip it if dump.endswith('\n...\n'): dump = dump[:-5] # #3761 add newline in output sys.stdout.write("%s\n"%(dump)) def dump_params(filename, param, verbose=False): """ Download a parameter tree from the Parameter Server and store in a yaml file :param filename: name of file to save YAML representation, ``str`` :param param: name of parameter/namespace to dump, ``str`` :param verbose: print verbose output for debugging, ``bool`` """ tree = get_param(param) if verbose: print_params(tree, param) if not filename: f = sys.stdout yaml.dump(tree, f) else: f = open(filename, 'w') try: yaml.dump(tree, f) finally: f.close() def delete_param(param, verbose=False): """ Delete a parameter from the Parameter Server :param param: parameter name, ``str`` :param verbose: print verbose output for debugging, ``bool`` """ try: if param == GLOBALNS: # not allowed to delete the root of the tree as it must always # have a value. the equivalent command is setting the root to an # empty dictionary get_param_server().setParam(GLOBALNS, {}) if verbose: print("deleted ENTIRE parameter server") else: get_param_server().deleteParam(param) if verbose: print("deleted parameter [%s]"%param) except socket.error: raise RosParamIOException("Unable to communicate with master!") # LOAD/SET def set_param_raw(param, value, verbose=False): """ Set param on the Parameter Server. Unlike L{set_param()}, this takes in a Python value to set instead of YAML. :param param: parameter name, ``str`` :param value XmlRpcLegalValue: value to upload, ``XmlRpcLegalValue`` """ if type(value) == dict: # #1098 changing dictionary behavior to be an update, rather # than replace behavior. for k, v in value.items(): # dictionary keys must be non-unicode strings if isinstance(k, str): set_param_raw(ns_join(param, k), v, verbose=verbose) else: raise RosParamException("YAML dictionaries must have string keys. Invalid dictionary is:\n%s"%value) else: try: expected_type = long except NameError : expected_type = int if type(value) == expected_type: if value > sys.maxsize: raise RosParamException("Overflow: Parameter Server integers must be 32-bit signed integers:\n\t-%s <= value <= %s"%(maxint - 1, maxint)) try: get_param_server().setParam(param, value) except socket.error: raise RosParamIOException("Unable to communicate with master!") if verbose: print("set parameter [%s] to [%s]"%(param, value)) def set_param(param, value, verbose=False): """ Set param on the ROS parameter server using a YAML value. :param param: parameter name, ``str`` :param value: yaml-encoded value, ``str`` """ set_param_raw(param, yaml.load(value), verbose=verbose) def upload_params(ns, values, verbose=False): """ Upload params to the Parameter Server :param values: key/value dictionary, where keys are parameter names and values are parameter values, ``dict`` :param ns: namespace to load parameters into, ``str`` """ if ns == '/' and not type(values) == dict: raise RosParamException("global / can only be set to a dictionary") if verbose: print_params(values, ns) set_param_raw(ns, values) # LIST def list_params(ns): """ Get list of parameters in ns :param ns: namespace to match, ``str`` """ try: ns = make_global_ns(ns) names = get_param_server().getParamNames() names.sort() return [n for n in names if n.startswith(ns)] except socket.error: raise RosParamIOException("Unable to communicate with master!") # COMMAND-LINE PARSING def _rosparam_cmd_get_dump(cmd, argv): """ Process command line for rosparam get/dump, e.g.:: rosparam get param rosparam dump file.yaml [namespace] :param cmd: command ('get' or 'dump'), ``str`` :param argv: command-line args, ``str`` """ # get and dump are equivalent functionality, just different arguments if cmd == 'dump': parser = OptionParser(usage="usage: %prog dump [options] file [namespace]", prog=NAME) elif cmd == 'get': parser = OptionParser(usage="usage: %prog get [options] parameter", prog=NAME) parser.add_option("-p", dest="pretty", default=False, action="store_true", help="pretty print. WARNING: not YAML-safe") parser.add_option("-v", dest="verbose", default=False, action="store_true", help="turn on verbose output") options, args = parser.parse_args(argv[2:]) arg = None ns = '' if len(args) == 0: if cmd == 'get': parser.error("invalid arguments. Please specify a parameter name") elif len(args) == 1: arg = args[0] elif len(args) == 2 and cmd == 'dump': arg = args[0] ns = args[1] else: parser.error("too many arguments") if cmd == 'get': _rosparam_cmd_get_param(script_resolve_name(NAME, arg), pretty=options.pretty, verbose=options.verbose) else: if options.verbose: print("dumping namespace [%s] to file [%s]"%(ns, arg)) dump_params(arg, script_resolve_name(NAME, ns), verbose=options.verbose) def _set_optparse_neg_args(parser, argv): # we don't use optparse to parse actual arguments, just options, # due to the fact that optparse doesn't handle negative numbers as # arguments. This parsing is complicated by the fact that we still # need to respect argument-bearing options like --textfile. args = [] optparse_args = [] skip = False for s in argv[2:]: if s.startswith('-'): if s in ['-t', '--textfile', '-b', '--binfile']: skip = True optparse_args.append(s) elif skip: parser.error("-t and --textfile options require an argument") elif len(s) > 1 and ord(s[1]) >= ord('0') and ord(s[1]) <= ord('9'): args.append(s) else: optparse_args.append(s) else: if skip: skip = False optparse_args.append(s) else: args.append(s) options, _ = parser.parse_args(optparse_args) return options, args # TODO: break this into separate routines, has gotten too ugly to multiplex def _rosparam_cmd_set_load(cmd, argv): """ Process command line for rosparam set/load, e.g.:: rosparam load file.yaml [namespace] rosparam set param value :param cmd: command name, ``str`` :param argv: command-line args, ``str`` """ if cmd == 'load': parser = OptionParser(usage="usage: %prog load [options] file [namespace]", prog=NAME) elif cmd == 'set': parser = OptionParser(usage="usage: %prog set [options] parameter value", prog=NAME) parser.add_option("-t", "--textfile", dest="text_file", default=None, metavar="TEXT_FILE", help="set parameters to contents of text file") parser.add_option("-b", "--binfile", dest="bin_file", default=None, metavar="BINARY_FILE", help="set parameters to contents of binary file") parser.add_option("-v", dest="verbose", default=False, action="store_true", help="turn on verbose output") if cmd == 'set': options, args = _set_optparse_neg_args(parser, argv) if options.text_file and options.bin_file: parser.error("you may only specify one of --textfile or --binfile") else: options, args = parser.parse_args(argv[2:]) arg2 = None if len(args) == 0: if cmd == 'load': parser.error("invalid arguments. Please specify a file name or - for stdin") elif cmd == 'set': parser.error("invalid arguments. Please specify a parameter name") elif len(args) == 1: arg = args[0] if cmd == 'set' and not (options.text_file or options.bin_file): parser.error("invalid arguments. Please specify a parameter value") elif len(args) == 2: arg = args[0] arg2 = args[1] else: parser.error("too many arguments") if cmd == 'set': name = script_resolve_name(NAME, arg) # #2647 if options.text_file: if not os.path.isfile(options.text_file): parser.error("file '%s' does not exist"%(options.text_file)) with open(options.text_file) as f: arg2 = f.read() set_param_raw(name, arg2, verbose=options.verbose) elif options.bin_file: with open(options.bin_file, 'rb') as f: arg2 = Binary(f.read()) set_param_raw(name, arg2, verbose=options.verbose) else: # #2237: the empty string is really hard to specify on the # command-line due to bash quoting rules. We cheat here and # let an empty Python string be an empty YAML string (instead # of YAML null, which has no meaning to the Parameter Server # anyway). if arg2 == '': arg2 = '!!str' set_param(name, arg2, verbose=options.verbose) else: paramlist = load_file(arg, default_namespace=script_resolve_name(NAME, arg2), verbose=options.verbose) for params,ns in paramlist: upload_params(ns, params, verbose=options.verbose) def _rosparam_cmd_list(argv): """ Process command line for rosparam set/load, e.g.:: rosparam load file.yaml [namespace] rosparam set param value :param argv: command-line args, ``str`` """ parser = OptionParser(usage="usage: %prog list [namespace]", prog=NAME) options, args = parser.parse_args(argv[2:]) ns = GLOBALNS if len(args) == 1: ns = script_resolve_name(NAME, args[0]) elif len(args) == 2: parser.error("too many arguments") print('\n'.join(list_params(ns))) def _rosparam_cmd_delete(argv): """ Process command line for rosparam delete, e.g.:: rosparam delete param :param cmd: command name, ``str`` :param argv: command-line args, ``str`` """ parser = OptionParser(usage="usage: %prog delete [options] parameter", prog=NAME) parser.add_option("-v", dest="verbose", default=False, action="store_true", help="turn on verbose output") options, args = parser.parse_args(argv[2:]) arg2 = None if len(args) == 0: parser.error("invalid arguments. Please specify a parameter name") elif len(args) == 1: arg = args[0] else: parser.error("too many arguments") try: delete_param(script_resolve_name(NAME, arg), verbose=options.verbose) except rosgraph.masterapi.Error as e: raise RosParamException(str(e)) def _fullusage(): """ Prints rosparam usage """ print("""rosparam is a command-line tool for getting, setting, and deleting parameters from the ROS Parameter Server. Commands: \trosparam set\tset parameter \trosparam get\tget parameter \trosparam load\tload parameters from file \trosparam dump\tdump parameters to file \trosparam delete\tdelete parameter \trosparam list\tlist parameter names """) sys.exit(0) def yamlmain(argv=None): """ Command-line main routine. Loads in one or more input files :param argv: command-line arguments or None to use sys.argv, ``[str]`` """ if argv is None: argv = sys.argv if len(argv) == 1: _fullusage() try: command = argv[1] if command in ['get', 'dump']: _rosparam_cmd_get_dump(command, argv) elif command in ['set', 'load']: _rosparam_cmd_set_load(command, argv) elif command in ['delete']: _rosparam_cmd_delete(argv) elif command == 'list': _rosparam_cmd_list(argv) else: _fullusage() except RosParamException as e: print("ERROR: "+str(e), file=sys.stderr) sys.exit(1) # YAML configuration. Doxygen does not like these being higher up in the code yaml.add_constructor(u'!radians', construct_angle_radians) yaml.add_constructor(u'!degrees', construct_angle_degrees) # allow both !degrees 180, !radians 2*pi pattern = re.compile(r'^deg\([^\)]*\)$') yaml.add_implicit_resolver(u'!degrees', pattern, first="deg(") pattern = re.compile(r'^rad\([^\)]*\)$') yaml.add_implicit_resolver(u'!radians', pattern, first="rad(")
print(val) else: dump = yaml.dump(val)
random_line_split
__init__.py
# Software License Agreement (BSD License) # # Copyright (c) 2008, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Revision $Id: rosparam 1641 2008-07-28 21:39:33Z sfkwc $ """ Implementation of the rosparam as well as a library for modifying the state of the ROS Parameter Server using YAML files. """ from __future__ import print_function NAME = 'rosparam' ## namespace key. Use of this in a YAML document specifies the ## namespace of all the params. NOTE: phasing out most use of this ## key. It's still useful in corner cases, but most of its ## functionality can be achieved with command-line arguments. NS = '_ns' import base64 import math import os import re import sys import socket try: from xmlrpc.client import Binary except ImportError: from xmlrpclib import Binary from optparse import OptionParser import yaml import rosgraph from rosgraph.names import script_resolve_name, ns_join, get_ros_namespace, make_caller_id, make_global_ns, GLOBALNS class RosParamException(Exception): """ rosparam base exception type """ pass class RosParamIOException(RosParamException): """ Exception for communication-based (i/o) errors. """ pass # pyyaml customizations for binary and angle data def represent_xml_binary(loader, data): """ Adds a pyyaml serializer to handle xmlrpclib.Binary objects """ data = base64.b64encode(data.data) return loader.represent_scalar(u'tag:yaml.org,2002:binary', data, style='|') def represent_foo(loader, data): return loader.represent_scalar(u'#', data) def construct_yaml_binary(loader, node): """ Overrides pyaml's constructor for binary data. Wraps binary data in xmlrpclib.Binary container instead of straight string representation. """ return Binary(loader.construct_yaml_binary(node)) # register the (de)serializers with pyyaml yaml.add_representer(Binary,represent_xml_binary) yaml.add_constructor(u'tag:yaml.org,2002:binary', construct_yaml_binary) def construct_angle_radians(loader, node): """ python-yaml utility for converting rad(num) into float value """ value = loader.construct_scalar(node).strip() exprvalue = value.replace('pi', 'math.pi') if exprvalue.startswith("rad("): exprvalue = exprvalue[4:-1] try: return float(eval(exprvalue)) except SyntaxError as e: raise RosParamException("invalid radian expression: %s"%value) def construct_angle_degrees(loader, node): """ python-yaml utility for converting deg(num) into float value """ value = loader.construct_scalar(node) exprvalue = value if exprvalue.startswith("deg("): exprvalue = exprvalue.strip()[4:-1] try: return float(exprvalue) * math.pi / 180.0 except ValueError: raise RosParamException("invalid degree value: %s"%value) # utilities def _get_caller_id(): """ :returns: caller ID for rosparam ROS client calls, ``str`` """ return make_caller_id('rosparam-%s'%os.getpid()) def
(params, ns): """ Print contents of param dictionary to screen """ if type(params) == dict: for k, v in params.items(): if type(v) == dict: print_params(v, ns_join(ns, k)) else: print("%s=%s"%(ns_join(ns, k), v)) else: print(params) # yaml processing def load_file(filename, default_namespace=None, verbose=False): """ Load the YAML document from the specified file :param filename: name of filename, ``str`` :param default_namespace: namespace to load filename into, ``str`` :returns [(dict, str)...]: list of parameter dictionary and corresponding namespaces for each YAML document in the file :raises: :exc:`RosParamException`: if unable to load contents of filename """ if not filename or filename == '-': f = sys.stdin if verbose: print("reading parameters from stdin") return load_str(f.read(), filename, default_namespace=default_namespace, verbose=verbose) else: if not os.path.isfile(filename): raise RosParamException("file [%s] does not exist"%filename) if verbose: print("reading parameters from [%s]"%filename) with open(filename, 'r') as f: return load_str(f.read(), filename, default_namespace=default_namespace, verbose=verbose) def load_str(str, filename, default_namespace=None, verbose=False): """ Load the YAML document as a string :param filename: name of filename, only used for debugging, ``str`` :param default_namespace: namespace to load filename into, ``str`` :param str: YAML text, ``str`` :returns: list of parameter dictionary and corresponding namespaces for each YAML document in the file, ``[(dict, str)...]`` """ paramlist = [] default_namespace = default_namespace or get_ros_namespace() for doc in yaml.load_all(str): if NS in doc: ns = ns_join(default_namespace, doc.get(NS, None)) if verbose: print("reading parameters into namespace [%s]"%ns) del doc[NS] else: ns = default_namespace paramlist.append((doc, ns)) return paramlist # DUMP/GET def get_param_server(): return rosgraph.Master(_get_caller_id()) def get_param(param): """ Download a parameter from Parameter Server :param param: parameter name to retrieve from parameter server. If param is a parameter namespace, entire parameter subtree will be downloaded, ``str`` """ try: return get_param_server().getParam(param) except socket.error: raise RosParamIOException("Unable to communicate with master!") # #698 def _pretty_print(value, indent=''): """ Pretty print get value :param value: value to print :param indent: indent level, used for recursive calls, ``str`` """ keys = list(value.keys()) keys.sort() for k in keys: v = value[k] if type(v) == dict: print("%s%s:"%(indent, k)) _pretty_print(v, indent+' ') elif type(v) == str: if '\n' in v: print(indent+'%s: |'%k) for l in v.split('\n'): print(indent+' '+l) else: print("%s%s: %s"%(indent, k, v)) else: dump = yaml.dump(v) # #1617 # newer versions of python-yaml append the '...' document end # syntax. as YAML functions fine w/o it, and as it is # confusing to users who are just getting a single scalar, we # strip it if dump.endswith('\n...\n'): dump = dump[:-4] sys.stdout.write("%s%s: %s"%(indent, k, dump)) def _rosparam_cmd_get_param(param, pretty=False, verbose=False): """ Download a parameter tree and print to screen :param param: parameter name to retrieve from Parameter Server. If param is a parameter namespace, entire parameter subtree will be downloaded, ``str`` """ # yaml.dump has a \n at the end, so use stdout.write instead of print if verbose: print("getting parameter [%s]"%param) try: val = get_param(param) except rosgraph.masterapi.Error as e: raise RosParamException(str(e)) if pretty and type(val) in [dict, str]: if type(val) == dict: _pretty_print(val) else: print(val) else: dump = yaml.dump(val) # #1617 # newer versions of python-yaml append the '...' document end # syntax. as YAML functions fine w/o it, and as it is # confusing to users who are just getting a single scalar, we # strip it if dump.endswith('\n...\n'): dump = dump[:-5] # #3761 add newline in output sys.stdout.write("%s\n"%(dump)) def dump_params(filename, param, verbose=False): """ Download a parameter tree from the Parameter Server and store in a yaml file :param filename: name of file to save YAML representation, ``str`` :param param: name of parameter/namespace to dump, ``str`` :param verbose: print verbose output for debugging, ``bool`` """ tree = get_param(param) if verbose: print_params(tree, param) if not filename: f = sys.stdout yaml.dump(tree, f) else: f = open(filename, 'w') try: yaml.dump(tree, f) finally: f.close() def delete_param(param, verbose=False): """ Delete a parameter from the Parameter Server :param param: parameter name, ``str`` :param verbose: print verbose output for debugging, ``bool`` """ try: if param == GLOBALNS: # not allowed to delete the root of the tree as it must always # have a value. the equivalent command is setting the root to an # empty dictionary get_param_server().setParam(GLOBALNS, {}) if verbose: print("deleted ENTIRE parameter server") else: get_param_server().deleteParam(param) if verbose: print("deleted parameter [%s]"%param) except socket.error: raise RosParamIOException("Unable to communicate with master!") # LOAD/SET def set_param_raw(param, value, verbose=False): """ Set param on the Parameter Server. Unlike L{set_param()}, this takes in a Python value to set instead of YAML. :param param: parameter name, ``str`` :param value XmlRpcLegalValue: value to upload, ``XmlRpcLegalValue`` """ if type(value) == dict: # #1098 changing dictionary behavior to be an update, rather # than replace behavior. for k, v in value.items(): # dictionary keys must be non-unicode strings if isinstance(k, str): set_param_raw(ns_join(param, k), v, verbose=verbose) else: raise RosParamException("YAML dictionaries must have string keys. Invalid dictionary is:\n%s"%value) else: try: expected_type = long except NameError : expected_type = int if type(value) == expected_type: if value > sys.maxsize: raise RosParamException("Overflow: Parameter Server integers must be 32-bit signed integers:\n\t-%s <= value <= %s"%(maxint - 1, maxint)) try: get_param_server().setParam(param, value) except socket.error: raise RosParamIOException("Unable to communicate with master!") if verbose: print("set parameter [%s] to [%s]"%(param, value)) def set_param(param, value, verbose=False): """ Set param on the ROS parameter server using a YAML value. :param param: parameter name, ``str`` :param value: yaml-encoded value, ``str`` """ set_param_raw(param, yaml.load(value), verbose=verbose) def upload_params(ns, values, verbose=False): """ Upload params to the Parameter Server :param values: key/value dictionary, where keys are parameter names and values are parameter values, ``dict`` :param ns: namespace to load parameters into, ``str`` """ if ns == '/' and not type(values) == dict: raise RosParamException("global / can only be set to a dictionary") if verbose: print_params(values, ns) set_param_raw(ns, values) # LIST def list_params(ns): """ Get list of parameters in ns :param ns: namespace to match, ``str`` """ try: ns = make_global_ns(ns) names = get_param_server().getParamNames() names.sort() return [n for n in names if n.startswith(ns)] except socket.error: raise RosParamIOException("Unable to communicate with master!") # COMMAND-LINE PARSING def _rosparam_cmd_get_dump(cmd, argv): """ Process command line for rosparam get/dump, e.g.:: rosparam get param rosparam dump file.yaml [namespace] :param cmd: command ('get' or 'dump'), ``str`` :param argv: command-line args, ``str`` """ # get and dump are equivalent functionality, just different arguments if cmd == 'dump': parser = OptionParser(usage="usage: %prog dump [options] file [namespace]", prog=NAME) elif cmd == 'get': parser = OptionParser(usage="usage: %prog get [options] parameter", prog=NAME) parser.add_option("-p", dest="pretty", default=False, action="store_true", help="pretty print. WARNING: not YAML-safe") parser.add_option("-v", dest="verbose", default=False, action="store_true", help="turn on verbose output") options, args = parser.parse_args(argv[2:]) arg = None ns = '' if len(args) == 0: if cmd == 'get': parser.error("invalid arguments. Please specify a parameter name") elif len(args) == 1: arg = args[0] elif len(args) == 2 and cmd == 'dump': arg = args[0] ns = args[1] else: parser.error("too many arguments") if cmd == 'get': _rosparam_cmd_get_param(script_resolve_name(NAME, arg), pretty=options.pretty, verbose=options.verbose) else: if options.verbose: print("dumping namespace [%s] to file [%s]"%(ns, arg)) dump_params(arg, script_resolve_name(NAME, ns), verbose=options.verbose) def _set_optparse_neg_args(parser, argv): # we don't use optparse to parse actual arguments, just options, # due to the fact that optparse doesn't handle negative numbers as # arguments. This parsing is complicated by the fact that we still # need to respect argument-bearing options like --textfile. args = [] optparse_args = [] skip = False for s in argv[2:]: if s.startswith('-'): if s in ['-t', '--textfile', '-b', '--binfile']: skip = True optparse_args.append(s) elif skip: parser.error("-t and --textfile options require an argument") elif len(s) > 1 and ord(s[1]) >= ord('0') and ord(s[1]) <= ord('9'): args.append(s) else: optparse_args.append(s) else: if skip: skip = False optparse_args.append(s) else: args.append(s) options, _ = parser.parse_args(optparse_args) return options, args # TODO: break this into separate routines, has gotten too ugly to multiplex def _rosparam_cmd_set_load(cmd, argv): """ Process command line for rosparam set/load, e.g.:: rosparam load file.yaml [namespace] rosparam set param value :param cmd: command name, ``str`` :param argv: command-line args, ``str`` """ if cmd == 'load': parser = OptionParser(usage="usage: %prog load [options] file [namespace]", prog=NAME) elif cmd == 'set': parser = OptionParser(usage="usage: %prog set [options] parameter value", prog=NAME) parser.add_option("-t", "--textfile", dest="text_file", default=None, metavar="TEXT_FILE", help="set parameters to contents of text file") parser.add_option("-b", "--binfile", dest="bin_file", default=None, metavar="BINARY_FILE", help="set parameters to contents of binary file") parser.add_option("-v", dest="verbose", default=False, action="store_true", help="turn on verbose output") if cmd == 'set': options, args = _set_optparse_neg_args(parser, argv) if options.text_file and options.bin_file: parser.error("you may only specify one of --textfile or --binfile") else: options, args = parser.parse_args(argv[2:]) arg2 = None if len(args) == 0: if cmd == 'load': parser.error("invalid arguments. Please specify a file name or - for stdin") elif cmd == 'set': parser.error("invalid arguments. Please specify a parameter name") elif len(args) == 1: arg = args[0] if cmd == 'set' and not (options.text_file or options.bin_file): parser.error("invalid arguments. Please specify a parameter value") elif len(args) == 2: arg = args[0] arg2 = args[1] else: parser.error("too many arguments") if cmd == 'set': name = script_resolve_name(NAME, arg) # #2647 if options.text_file: if not os.path.isfile(options.text_file): parser.error("file '%s' does not exist"%(options.text_file)) with open(options.text_file) as f: arg2 = f.read() set_param_raw(name, arg2, verbose=options.verbose) elif options.bin_file: with open(options.bin_file, 'rb') as f: arg2 = Binary(f.read()) set_param_raw(name, arg2, verbose=options.verbose) else: # #2237: the empty string is really hard to specify on the # command-line due to bash quoting rules. We cheat here and # let an empty Python string be an empty YAML string (instead # of YAML null, which has no meaning to the Parameter Server # anyway). if arg2 == '': arg2 = '!!str' set_param(name, arg2, verbose=options.verbose) else: paramlist = load_file(arg, default_namespace=script_resolve_name(NAME, arg2), verbose=options.verbose) for params,ns in paramlist: upload_params(ns, params, verbose=options.verbose) def _rosparam_cmd_list(argv): """ Process command line for rosparam set/load, e.g.:: rosparam load file.yaml [namespace] rosparam set param value :param argv: command-line args, ``str`` """ parser = OptionParser(usage="usage: %prog list [namespace]", prog=NAME) options, args = parser.parse_args(argv[2:]) ns = GLOBALNS if len(args) == 1: ns = script_resolve_name(NAME, args[0]) elif len(args) == 2: parser.error("too many arguments") print('\n'.join(list_params(ns))) def _rosparam_cmd_delete(argv): """ Process command line for rosparam delete, e.g.:: rosparam delete param :param cmd: command name, ``str`` :param argv: command-line args, ``str`` """ parser = OptionParser(usage="usage: %prog delete [options] parameter", prog=NAME) parser.add_option("-v", dest="verbose", default=False, action="store_true", help="turn on verbose output") options, args = parser.parse_args(argv[2:]) arg2 = None if len(args) == 0: parser.error("invalid arguments. Please specify a parameter name") elif len(args) == 1: arg = args[0] else: parser.error("too many arguments") try: delete_param(script_resolve_name(NAME, arg), verbose=options.verbose) except rosgraph.masterapi.Error as e: raise RosParamException(str(e)) def _fullusage(): """ Prints rosparam usage """ print("""rosparam is a command-line tool for getting, setting, and deleting parameters from the ROS Parameter Server. Commands: \trosparam set\tset parameter \trosparam get\tget parameter \trosparam load\tload parameters from file \trosparam dump\tdump parameters to file \trosparam delete\tdelete parameter \trosparam list\tlist parameter names """) sys.exit(0) def yamlmain(argv=None): """ Command-line main routine. Loads in one or more input files :param argv: command-line arguments or None to use sys.argv, ``[str]`` """ if argv is None: argv = sys.argv if len(argv) == 1: _fullusage() try: command = argv[1] if command in ['get', 'dump']: _rosparam_cmd_get_dump(command, argv) elif command in ['set', 'load']: _rosparam_cmd_set_load(command, argv) elif command in ['delete']: _rosparam_cmd_delete(argv) elif command == 'list': _rosparam_cmd_list(argv) else: _fullusage() except RosParamException as e: print("ERROR: "+str(e), file=sys.stderr) sys.exit(1) # YAML configuration. Doxygen does not like these being higher up in the code yaml.add_constructor(u'!radians', construct_angle_radians) yaml.add_constructor(u'!degrees', construct_angle_degrees) # allow both !degrees 180, !radians 2*pi pattern = re.compile(r'^deg\([^\)]*\)$') yaml.add_implicit_resolver(u'!degrees', pattern, first="deg(") pattern = re.compile(r'^rad\([^\)]*\)$') yaml.add_implicit_resolver(u'!radians', pattern, first="rad(")
print_params
identifier_name
methods.py
""" This module defines all possible functional forms of harmonization methods and the default decision tree for choosing which method to use. """ from bisect import bisect import numpy as np import pandas as pd import pyomo.environ as pyo from aneris import utils def harmonize_factors(df, hist, harmonize_year="2015"): """ Calculate offset and ratio values between data and history. Parameters ---------- df : pd.DataFrame model data hist : pd.DataFrame historical data harmonize_year : string, optional column name of harmonization year Returns ------- offset : pd.Series offset (history - model) ratio : pd.Series ratio (history / model) """ c, m = hist[harmonize_year], df[harmonize_year] offset = (c - m).fillna(0) offset.name = "offset" ratios = (c / m).replace(np.inf, np.nan).fillna(0) ratios.name = "ratio" return offset, ratios def constant_offset(df, offset, harmonize_year="2015"): """ Calculate constant offset harmonized trajectory. Parameters ---------- df : pd.DataFrame model data offset : pd.DataFrame offset data harmonize_year : string, optional column name of harmonization year, ignored Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() numcols = utils.numcols(df) # just add offset to all values df[numcols] = df[numcols].add(offset, axis=0) return df def constant_ratio(df, ratios, harmonize_year="2015"): """ Calculate constant ratio harmonized trajectory. Parameters ---------- df : pd.DataFrame model data ratio : pd.DataFrame ratio data harmonize_year : string, optional column name of harmonization year, ignored Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() numcols = utils.numcols(df) # just add offset to all values df[numcols] = df[numcols].multiply(ratios, axis=0) return df def linear_interpolate(df, offset, final_year="2050", harmonize_year="2015"): """ Calculate linearly interpolated convergence harmonized trajectory. Parameters ---------- df : pd.DataFrame model data offset : pd.DataFrame offset data final_year : string, optional column name of convergence year harmonize_year : string, optional column name of harmonization year Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() x1, x2 = harmonize_year, final_year y1, y2 = offset + df[x1], df[x2] m = (y2 - y1) / (float(x2) - float(x1)) b = y1 - m * float(x1) cols = [x for x in utils.numcols(df) if int(x) < int(final_year)] for c in cols: df[c] = m * float(c) + b return df def reduce_offset(df, offset, final_year="2050", harmonize_year="2015"): """ Calculate offset convergence harmonized trajectory. Parameters ---------- df : pd.DataFrame model data offset : pd.DataFrame offset data final_year : string, optional column name of convergence year harmonize_year : string, optional column name of harmonization year Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() yi, yf = int(harmonize_year), int(final_year) numcols = utils.numcols(df) numcols_int = [int(v) for v in numcols] # get factors that reduce from 1 to 0; factors before base year are > 1 f = lambda year: -(year - yi) / float(yf - yi) + 1 factors = [f(year) if year <= yf else 0.0 for year in numcols_int] # add existing values to offset time series offsets = pd.DataFrame( np.outer(offset, factors), columns=numcols, index=offset.index ) df[numcols] = df[numcols] + offsets return df def reduce_ratio(df, ratios, final_year="2050", harmonize_year="2015"): """ Calculate ratio convergence harmonized trajectory. Parameters ---------- df : pd.DataFrame model data ratio : pd.DataFrame ratio data final_year : string, optional column name of convergence year harmonize_year : string, optional column name of harmonization year Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() yi, yf = int(harmonize_year), int(final_year) numcols = utils.numcols(df) numcols_int = [int(v) for v in numcols] # get factors that reduce from 1 to 0, but replace with 1s in years prior # to harmonization f = lambda year: -(year - yi) / float(yf - yi) + 1 prefactors = [f(yi) for year in numcols_int if year < yi] postfactors = [f(year) if year <= yf else 0.0 for year in numcols_int if year >= yi] factors = prefactors + postfactors # multiply existing values by ratio time series ratios = ( pd.DataFrame(np.outer(ratios - 1, factors), columns=numcols, index=ratios.index) + 1 ) df[numcols] = df[numcols] * ratios return df def budget(df, df_hist, harmonize_year="2015"): r""" Calculate budget harmonized trajectory. Parameters ---------- df : pd.DataFrame model data df_hist : pd.DataFrame historic data harmonize_year : string, optional column name of harmonization year Returns ------- df_harm : pd.DataFrame harmonized trajectories Notes ----- Finds an emissions trajectory consistent with a provided historical emissions timeseries that closely matches a modeled result, while maintaining the overall carbon budget. An optimization problem is constructed and solved by IPOPT, which minimizes the difference between the rate of change of the model and the harmonized model in each year, while 1. preserving the carbon budget of the model, and 2. being consistent with the historical value. With years :math:`y_i`, model results :math:`m_i`, harmonized results :math:`x_i`, historical value :math:`h_0` and a remaining carbon budget :math:`B`, the optimization problem can be formulated as .. math:: \min_{x_i} \sum_{i \in |I - 1|} \big( \frac{m_{i+1} - m_i}{y_{i + 1} - y_{i}} - \frac{x_{i+1} - x_i}{y_{i + 1} - y_{i}} \big)^2 s.t. .. math:: \sum_{i} (y_{i + 1} - y_{i}) \big( x_i + 0.5 (x_{i+1} - x_i) \big) = B \quad \text{(carbon budget preservation)} and .. math:: x_0 = h_0 \quad \text{(consistency with historical values)} """ harmonize_year = int(harmonize_year) df = df.set_axis(df.columns.astype(int), axis="columns") df_hist = df_hist.set_axis(df_hist.columns.astype(int), axis="columns") data_years = df.columns hist_years = df_hist.columns years = data_years[data_years >= harmonize_year] if data_years[0] not in hist_years: hist_years = hist_years.insert(bisect(hist_years, data_years[0]), data_years[0]) df_hist = df_hist.reindex(columns=hist_years).interpolate( method="slinear", axis=1 ) def carbon_budget(years, emissions): # trapezoid rule dyears = np.diff(years) demissions = np.diff(emissions) budget = (dyears * (np.asarray(emissions)[:-1] + demissions / 2)).sum() return budget solver = pyo.SolverFactory("ipopt") if solver.executable() is None: raise RuntimeError( "No executable for the solver 'ipopt' found " "(necessary for the budget harmonization). " "Install from conda-forge or add to PATH." ) harmonized = [] for region in df.index: model = pyo.ConcreteModel() """ PARAMETERS """ data_vals = df.loc[region, years] hist_val = df_hist.loc[region, harmonize_year] budget_val = carbon_budget(data_years, df.loc[region, :]) if data_years[0] < harmonize_year: hist_in_overlap = df_hist.loc[region, data_years[0] : harmonize_year] budget_val -= carbon_budget(hist_in_overlap.index, hist_in_overlap) """ VARIABLES """ model.x = pyo.Var(years, initialize=0, domain=pyo.Reals) x = np.array( [model.x[y] for y in years] ) # keeps pyomo VarData objects, ie. modelling vars not numbers """ OBJECTIVE FUNCTION """ delta_years = np.diff(years) delta_x = np.diff(x) delta_m = np.diff(data_vals) def l2_norm(): return pyo.quicksum((delta_m / delta_years - delta_x / delta_years) ** 2) model.obj = pyo.Objective(expr=l2_norm(), sense=pyo.minimize) """ CONSTRAINTS """ model.hist_val = pyo.Constraint(expr=model.x[harmonize_year] == hist_val) model.budget = pyo.Constraint(expr=carbon_budget(years, x) == budget_val) """ RUN """ results = solver.solve(model) assert (results.solver.status == pyo.SolverStatus.ok) and ( results.solver.termination_condition == pyo.TerminationCondition.optimal ), ( f"ipopt terminated budget optimization with status: " f"{results.solver.status}, {results.solver.termination_condition}" ) harmonized.append([pyo.value(model.x[y]) for y in years]) df_harm = pd.DataFrame( harmonized, index=df.index, columns=years.astype(str), ) return df_harm def model_zero(df, offset, harmonize_year="2015"): """ Returns result of aneris.methods.constant_offset() """ # current decision is to return a simple offset, this will be a straight # line for all time periods. previous behavior was to set df[numcols] = 0, # i.e., report 0 if model reports 0. return constant_offset(df, offset) def hist_zero(df, *args, **kwargs): """ Returns df (no change) """ # TODO: should this set values to 0? df = df.copy() return df def coeff_of_var(s): """ Returns coefficient of variation of a Series. .. math:: c_v = \\frac{\\sigma(s^{\\prime}(t))}{\\mu(s^{\\prime}(t))} Parameters ---------- s : pd.Series timeseries Returns ------- c_v : float coefficient of variation """ x = np.diff(s.values) return np.abs(np.std(x) / np.mean(x)) def default_method_choice( row, ratio_method, offset_method, luc_method, luc_cov_threshold ): """ Default decision tree as documented at. Refer to choice flow chart at https://drive.google.com/drive/folders/0B6_Oqvcg8eP9QXVKX2lFVUJiZHc for arguments available in row and their definition """ # special cases if row.h == 0: return "hist_zero" if row.zero_m: return "model_zero" if np.isinf(row.f) and row.neg_m and row.pos_m: # model == 0 in base year, and model goes negative # and positive return "unicorn" # this shouldn't exist! # model 0 in base year? if np.isclose(row.m, 0): # goes negative? if row.neg_m: return offset_method else: return "constant_offset" else: # is this co2? # ZN: This gas dependence isn't documented in the default # decision tree if hasattr(row, "gas") and row.gas == "CO2": return ratio_method # is cov big? if np.isfinite(row["cov"]) and row["cov"] > luc_cov_threshold: return luc_method else: # dH small? if row.dH < 0.5: return ratio_method else: # goes negative? if row.neg_m: return "reduce_ratio_2100" else: return "constant_ratio" def default_methods(hist, model, base_year, method_choice=None, **kwargs): """ Determine default harmonization methods to use. See http://mattgidden.com/aneris/theory.html#default-decision-tree for a graphical description of the decision tree. Parameters ---------- hist : pd.DataFrame historical data model : pd.DataFrame model data base_year : string, int harmonization year method_choice : function, optional codified decision tree, see `default_method_choice` function **kwargs : Additional parameters passed on to the choice function: ratio_method : string, optional method to use for ratio harmonization, default: reduce_ratio_2080 offset_method : string, optional method to use for offset harmonization, default: reduce_offset_2080 luc_method : string, optional method to use for high coefficient of variation, reduce_offset_2150_cov luc_cov_threshold : float cov threshold above which to use `luc_method` Returns ------- methods : pd.Series default harmonization methods metadata : pd.DataFrame metadata regarding why each method was chosen See also -------- `default_method_choice` """ if kwargs.get("ratio_method") is None:
if kwargs.get("offset_method") is None: kwargs["offset_method"] = "reduce_offset_2080" if kwargs.get("luc_method") is None: kwargs["luc_method"] = "reduce_offset_2150_cov" if kwargs.get("luc_cov_threshold") is None: kwargs["luc_cov_threshold"] = 10 y = str(base_year) try: h = hist[base_year] m = model[base_year] except KeyError: h = hist[y] m = model[y] dH = (h - m).abs() / h f = h / m dM = (model.max(axis=1) - model.min(axis=1)).abs() / model.max(axis=1) neg_m = (model < 0).any(axis=1) pos_m = (model > 0).any(axis=1) zero_m = (model == 0).all(axis=1) go_neg = ((model.min(axis=1) - h) < 0).any() cov = hist.apply(coeff_of_var, axis=1) df = pd.DataFrame( { "dH": dH, "f": f, "dM": dM, "neg_m": neg_m, "pos_m": pos_m, "zero_m": zero_m, "go_neg": go_neg, "cov": cov, "h": h, "m": m, } ).join(model.index.to_frame()) if method_choice is None: method_choice = default_method_choice ret = df.apply(method_choice, axis=1, **kwargs) ret.name = "method" return ret, df
kwargs["ratio_method"] = "reduce_ratio_2080"
conditional_block
methods.py
""" This module defines all possible functional forms of harmonization methods and the default decision tree for choosing which method to use. """ from bisect import bisect import numpy as np import pandas as pd import pyomo.environ as pyo from aneris import utils def harmonize_factors(df, hist, harmonize_year="2015"): """ Calculate offset and ratio values between data and history. Parameters ---------- df : pd.DataFrame model data hist : pd.DataFrame historical data harmonize_year : string, optional column name of harmonization year Returns ------- offset : pd.Series offset (history - model) ratio : pd.Series ratio (history / model) """ c, m = hist[harmonize_year], df[harmonize_year] offset = (c - m).fillna(0) offset.name = "offset" ratios = (c / m).replace(np.inf, np.nan).fillna(0) ratios.name = "ratio" return offset, ratios def constant_offset(df, offset, harmonize_year="2015"): """ Calculate constant offset harmonized trajectory. Parameters ---------- df : pd.DataFrame model data offset : pd.DataFrame offset data harmonize_year : string, optional column name of harmonization year, ignored Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() numcols = utils.numcols(df) # just add offset to all values df[numcols] = df[numcols].add(offset, axis=0) return df def constant_ratio(df, ratios, harmonize_year="2015"): """ Calculate constant ratio harmonized trajectory. Parameters ---------- df : pd.DataFrame model data ratio : pd.DataFrame ratio data harmonize_year : string, optional column name of harmonization year, ignored Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() numcols = utils.numcols(df) # just add offset to all values df[numcols] = df[numcols].multiply(ratios, axis=0) return df def linear_interpolate(df, offset, final_year="2050", harmonize_year="2015"): """ Calculate linearly interpolated convergence harmonized trajectory. Parameters ---------- df : pd.DataFrame model data offset : pd.DataFrame offset data final_year : string, optional column name of convergence year harmonize_year : string, optional column name of harmonization year Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() x1, x2 = harmonize_year, final_year y1, y2 = offset + df[x1], df[x2] m = (y2 - y1) / (float(x2) - float(x1)) b = y1 - m * float(x1) cols = [x for x in utils.numcols(df) if int(x) < int(final_year)] for c in cols: df[c] = m * float(c) + b return df def reduce_offset(df, offset, final_year="2050", harmonize_year="2015"):
def reduce_ratio(df, ratios, final_year="2050", harmonize_year="2015"): """ Calculate ratio convergence harmonized trajectory. Parameters ---------- df : pd.DataFrame model data ratio : pd.DataFrame ratio data final_year : string, optional column name of convergence year harmonize_year : string, optional column name of harmonization year Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() yi, yf = int(harmonize_year), int(final_year) numcols = utils.numcols(df) numcols_int = [int(v) for v in numcols] # get factors that reduce from 1 to 0, but replace with 1s in years prior # to harmonization f = lambda year: -(year - yi) / float(yf - yi) + 1 prefactors = [f(yi) for year in numcols_int if year < yi] postfactors = [f(year) if year <= yf else 0.0 for year in numcols_int if year >= yi] factors = prefactors + postfactors # multiply existing values by ratio time series ratios = ( pd.DataFrame(np.outer(ratios - 1, factors), columns=numcols, index=ratios.index) + 1 ) df[numcols] = df[numcols] * ratios return df def budget(df, df_hist, harmonize_year="2015"): r""" Calculate budget harmonized trajectory. Parameters ---------- df : pd.DataFrame model data df_hist : pd.DataFrame historic data harmonize_year : string, optional column name of harmonization year Returns ------- df_harm : pd.DataFrame harmonized trajectories Notes ----- Finds an emissions trajectory consistent with a provided historical emissions timeseries that closely matches a modeled result, while maintaining the overall carbon budget. An optimization problem is constructed and solved by IPOPT, which minimizes the difference between the rate of change of the model and the harmonized model in each year, while 1. preserving the carbon budget of the model, and 2. being consistent with the historical value. With years :math:`y_i`, model results :math:`m_i`, harmonized results :math:`x_i`, historical value :math:`h_0` and a remaining carbon budget :math:`B`, the optimization problem can be formulated as .. math:: \min_{x_i} \sum_{i \in |I - 1|} \big( \frac{m_{i+1} - m_i}{y_{i + 1} - y_{i}} - \frac{x_{i+1} - x_i}{y_{i + 1} - y_{i}} \big)^2 s.t. .. math:: \sum_{i} (y_{i + 1} - y_{i}) \big( x_i + 0.5 (x_{i+1} - x_i) \big) = B \quad \text{(carbon budget preservation)} and .. math:: x_0 = h_0 \quad \text{(consistency with historical values)} """ harmonize_year = int(harmonize_year) df = df.set_axis(df.columns.astype(int), axis="columns") df_hist = df_hist.set_axis(df_hist.columns.astype(int), axis="columns") data_years = df.columns hist_years = df_hist.columns years = data_years[data_years >= harmonize_year] if data_years[0] not in hist_years: hist_years = hist_years.insert(bisect(hist_years, data_years[0]), data_years[0]) df_hist = df_hist.reindex(columns=hist_years).interpolate( method="slinear", axis=1 ) def carbon_budget(years, emissions): # trapezoid rule dyears = np.diff(years) demissions = np.diff(emissions) budget = (dyears * (np.asarray(emissions)[:-1] + demissions / 2)).sum() return budget solver = pyo.SolverFactory("ipopt") if solver.executable() is None: raise RuntimeError( "No executable for the solver 'ipopt' found " "(necessary for the budget harmonization). " "Install from conda-forge or add to PATH." ) harmonized = [] for region in df.index: model = pyo.ConcreteModel() """ PARAMETERS """ data_vals = df.loc[region, years] hist_val = df_hist.loc[region, harmonize_year] budget_val = carbon_budget(data_years, df.loc[region, :]) if data_years[0] < harmonize_year: hist_in_overlap = df_hist.loc[region, data_years[0] : harmonize_year] budget_val -= carbon_budget(hist_in_overlap.index, hist_in_overlap) """ VARIABLES """ model.x = pyo.Var(years, initialize=0, domain=pyo.Reals) x = np.array( [model.x[y] for y in years] ) # keeps pyomo VarData objects, ie. modelling vars not numbers """ OBJECTIVE FUNCTION """ delta_years = np.diff(years) delta_x = np.diff(x) delta_m = np.diff(data_vals) def l2_norm(): return pyo.quicksum((delta_m / delta_years - delta_x / delta_years) ** 2) model.obj = pyo.Objective(expr=l2_norm(), sense=pyo.minimize) """ CONSTRAINTS """ model.hist_val = pyo.Constraint(expr=model.x[harmonize_year] == hist_val) model.budget = pyo.Constraint(expr=carbon_budget(years, x) == budget_val) """ RUN """ results = solver.solve(model) assert (results.solver.status == pyo.SolverStatus.ok) and ( results.solver.termination_condition == pyo.TerminationCondition.optimal ), ( f"ipopt terminated budget optimization with status: " f"{results.solver.status}, {results.solver.termination_condition}" ) harmonized.append([pyo.value(model.x[y]) for y in years]) df_harm = pd.DataFrame( harmonized, index=df.index, columns=years.astype(str), ) return df_harm def model_zero(df, offset, harmonize_year="2015"): """ Returns result of aneris.methods.constant_offset() """ # current decision is to return a simple offset, this will be a straight # line for all time periods. previous behavior was to set df[numcols] = 0, # i.e., report 0 if model reports 0. return constant_offset(df, offset) def hist_zero(df, *args, **kwargs): """ Returns df (no change) """ # TODO: should this set values to 0? df = df.copy() return df def coeff_of_var(s): """ Returns coefficient of variation of a Series. .. math:: c_v = \\frac{\\sigma(s^{\\prime}(t))}{\\mu(s^{\\prime}(t))} Parameters ---------- s : pd.Series timeseries Returns ------- c_v : float coefficient of variation """ x = np.diff(s.values) return np.abs(np.std(x) / np.mean(x)) def default_method_choice( row, ratio_method, offset_method, luc_method, luc_cov_threshold ): """ Default decision tree as documented at. Refer to choice flow chart at https://drive.google.com/drive/folders/0B6_Oqvcg8eP9QXVKX2lFVUJiZHc for arguments available in row and their definition """ # special cases if row.h == 0: return "hist_zero" if row.zero_m: return "model_zero" if np.isinf(row.f) and row.neg_m and row.pos_m: # model == 0 in base year, and model goes negative # and positive return "unicorn" # this shouldn't exist! # model 0 in base year? if np.isclose(row.m, 0): # goes negative? if row.neg_m: return offset_method else: return "constant_offset" else: # is this co2? # ZN: This gas dependence isn't documented in the default # decision tree if hasattr(row, "gas") and row.gas == "CO2": return ratio_method # is cov big? if np.isfinite(row["cov"]) and row["cov"] > luc_cov_threshold: return luc_method else: # dH small? if row.dH < 0.5: return ratio_method else: # goes negative? if row.neg_m: return "reduce_ratio_2100" else: return "constant_ratio" def default_methods(hist, model, base_year, method_choice=None, **kwargs): """ Determine default harmonization methods to use. See http://mattgidden.com/aneris/theory.html#default-decision-tree for a graphical description of the decision tree. Parameters ---------- hist : pd.DataFrame historical data model : pd.DataFrame model data base_year : string, int harmonization year method_choice : function, optional codified decision tree, see `default_method_choice` function **kwargs : Additional parameters passed on to the choice function: ratio_method : string, optional method to use for ratio harmonization, default: reduce_ratio_2080 offset_method : string, optional method to use for offset harmonization, default: reduce_offset_2080 luc_method : string, optional method to use for high coefficient of variation, reduce_offset_2150_cov luc_cov_threshold : float cov threshold above which to use `luc_method` Returns ------- methods : pd.Series default harmonization methods metadata : pd.DataFrame metadata regarding why each method was chosen See also -------- `default_method_choice` """ if kwargs.get("ratio_method") is None: kwargs["ratio_method"] = "reduce_ratio_2080" if kwargs.get("offset_method") is None: kwargs["offset_method"] = "reduce_offset_2080" if kwargs.get("luc_method") is None: kwargs["luc_method"] = "reduce_offset_2150_cov" if kwargs.get("luc_cov_threshold") is None: kwargs["luc_cov_threshold"] = 10 y = str(base_year) try: h = hist[base_year] m = model[base_year] except KeyError: h = hist[y] m = model[y] dH = (h - m).abs() / h f = h / m dM = (model.max(axis=1) - model.min(axis=1)).abs() / model.max(axis=1) neg_m = (model < 0).any(axis=1) pos_m = (model > 0).any(axis=1) zero_m = (model == 0).all(axis=1) go_neg = ((model.min(axis=1) - h) < 0).any() cov = hist.apply(coeff_of_var, axis=1) df = pd.DataFrame( { "dH": dH, "f": f, "dM": dM, "neg_m": neg_m, "pos_m": pos_m, "zero_m": zero_m, "go_neg": go_neg, "cov": cov, "h": h, "m": m, } ).join(model.index.to_frame()) if method_choice is None: method_choice = default_method_choice ret = df.apply(method_choice, axis=1, **kwargs) ret.name = "method" return ret, df
""" Calculate offset convergence harmonized trajectory. Parameters ---------- df : pd.DataFrame model data offset : pd.DataFrame offset data final_year : string, optional column name of convergence year harmonize_year : string, optional column name of harmonization year Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() yi, yf = int(harmonize_year), int(final_year) numcols = utils.numcols(df) numcols_int = [int(v) for v in numcols] # get factors that reduce from 1 to 0; factors before base year are > 1 f = lambda year: -(year - yi) / float(yf - yi) + 1 factors = [f(year) if year <= yf else 0.0 for year in numcols_int] # add existing values to offset time series offsets = pd.DataFrame( np.outer(offset, factors), columns=numcols, index=offset.index ) df[numcols] = df[numcols] + offsets return df
identifier_body
methods.py
""" This module defines all possible functional forms of harmonization methods and the default decision tree for choosing which method to use. """ from bisect import bisect import numpy as np import pandas as pd import pyomo.environ as pyo from aneris import utils def harmonize_factors(df, hist, harmonize_year="2015"): """ Calculate offset and ratio values between data and history. Parameters ---------- df : pd.DataFrame model data hist : pd.DataFrame historical data harmonize_year : string, optional column name of harmonization year Returns ------- offset : pd.Series offset (history - model) ratio : pd.Series ratio (history / model) """ c, m = hist[harmonize_year], df[harmonize_year] offset = (c - m).fillna(0) offset.name = "offset" ratios = (c / m).replace(np.inf, np.nan).fillna(0) ratios.name = "ratio" return offset, ratios def constant_offset(df, offset, harmonize_year="2015"): """ Calculate constant offset harmonized trajectory. Parameters ---------- df : pd.DataFrame model data offset : pd.DataFrame offset data harmonize_year : string, optional column name of harmonization year, ignored Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() numcols = utils.numcols(df) # just add offset to all values df[numcols] = df[numcols].add(offset, axis=0) return df def constant_ratio(df, ratios, harmonize_year="2015"): """ Calculate constant ratio harmonized trajectory. Parameters ---------- df : pd.DataFrame model data ratio : pd.DataFrame ratio data harmonize_year : string, optional column name of harmonization year, ignored Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() numcols = utils.numcols(df) # just add offset to all values df[numcols] = df[numcols].multiply(ratios, axis=0) return df def linear_interpolate(df, offset, final_year="2050", harmonize_year="2015"): """ Calculate linearly interpolated convergence harmonized trajectory. Parameters ---------- df : pd.DataFrame model data offset : pd.DataFrame offset data final_year : string, optional column name of convergence year harmonize_year : string, optional column name of harmonization year Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() x1, x2 = harmonize_year, final_year y1, y2 = offset + df[x1], df[x2] m = (y2 - y1) / (float(x2) - float(x1)) b = y1 - m * float(x1) cols = [x for x in utils.numcols(df) if int(x) < int(final_year)] for c in cols: df[c] = m * float(c) + b return df def reduce_offset(df, offset, final_year="2050", harmonize_year="2015"): """ Calculate offset convergence harmonized trajectory. Parameters ---------- df : pd.DataFrame model data offset : pd.DataFrame offset data final_year : string, optional column name of convergence year harmonize_year : string, optional column name of harmonization year Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() yi, yf = int(harmonize_year), int(final_year) numcols = utils.numcols(df) numcols_int = [int(v) for v in numcols] # get factors that reduce from 1 to 0; factors before base year are > 1 f = lambda year: -(year - yi) / float(yf - yi) + 1 factors = [f(year) if year <= yf else 0.0 for year in numcols_int] # add existing values to offset time series offsets = pd.DataFrame( np.outer(offset, factors), columns=numcols, index=offset.index ) df[numcols] = df[numcols] + offsets return df def reduce_ratio(df, ratios, final_year="2050", harmonize_year="2015"): """ Calculate ratio convergence harmonized trajectory. Parameters ---------- df : pd.DataFrame model data ratio : pd.DataFrame ratio data final_year : string, optional column name of convergence year harmonize_year : string, optional column name of harmonization year Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() yi, yf = int(harmonize_year), int(final_year) numcols = utils.numcols(df) numcols_int = [int(v) for v in numcols] # get factors that reduce from 1 to 0, but replace with 1s in years prior # to harmonization f = lambda year: -(year - yi) / float(yf - yi) + 1 prefactors = [f(yi) for year in numcols_int if year < yi] postfactors = [f(year) if year <= yf else 0.0 for year in numcols_int if year >= yi] factors = prefactors + postfactors # multiply existing values by ratio time series ratios = ( pd.DataFrame(np.outer(ratios - 1, factors), columns=numcols, index=ratios.index) + 1 ) df[numcols] = df[numcols] * ratios return df def
(df, df_hist, harmonize_year="2015"): r""" Calculate budget harmonized trajectory. Parameters ---------- df : pd.DataFrame model data df_hist : pd.DataFrame historic data harmonize_year : string, optional column name of harmonization year Returns ------- df_harm : pd.DataFrame harmonized trajectories Notes ----- Finds an emissions trajectory consistent with a provided historical emissions timeseries that closely matches a modeled result, while maintaining the overall carbon budget. An optimization problem is constructed and solved by IPOPT, which minimizes the difference between the rate of change of the model and the harmonized model in each year, while 1. preserving the carbon budget of the model, and 2. being consistent with the historical value. With years :math:`y_i`, model results :math:`m_i`, harmonized results :math:`x_i`, historical value :math:`h_0` and a remaining carbon budget :math:`B`, the optimization problem can be formulated as .. math:: \min_{x_i} \sum_{i \in |I - 1|} \big( \frac{m_{i+1} - m_i}{y_{i + 1} - y_{i}} - \frac{x_{i+1} - x_i}{y_{i + 1} - y_{i}} \big)^2 s.t. .. math:: \sum_{i} (y_{i + 1} - y_{i}) \big( x_i + 0.5 (x_{i+1} - x_i) \big) = B \quad \text{(carbon budget preservation)} and .. math:: x_0 = h_0 \quad \text{(consistency with historical values)} """ harmonize_year = int(harmonize_year) df = df.set_axis(df.columns.astype(int), axis="columns") df_hist = df_hist.set_axis(df_hist.columns.astype(int), axis="columns") data_years = df.columns hist_years = df_hist.columns years = data_years[data_years >= harmonize_year] if data_years[0] not in hist_years: hist_years = hist_years.insert(bisect(hist_years, data_years[0]), data_years[0]) df_hist = df_hist.reindex(columns=hist_years).interpolate( method="slinear", axis=1 ) def carbon_budget(years, emissions): # trapezoid rule dyears = np.diff(years) demissions = np.diff(emissions) budget = (dyears * (np.asarray(emissions)[:-1] + demissions / 2)).sum() return budget solver = pyo.SolverFactory("ipopt") if solver.executable() is None: raise RuntimeError( "No executable for the solver 'ipopt' found " "(necessary for the budget harmonization). " "Install from conda-forge or add to PATH." ) harmonized = [] for region in df.index: model = pyo.ConcreteModel() """ PARAMETERS """ data_vals = df.loc[region, years] hist_val = df_hist.loc[region, harmonize_year] budget_val = carbon_budget(data_years, df.loc[region, :]) if data_years[0] < harmonize_year: hist_in_overlap = df_hist.loc[region, data_years[0] : harmonize_year] budget_val -= carbon_budget(hist_in_overlap.index, hist_in_overlap) """ VARIABLES """ model.x = pyo.Var(years, initialize=0, domain=pyo.Reals) x = np.array( [model.x[y] for y in years] ) # keeps pyomo VarData objects, ie. modelling vars not numbers """ OBJECTIVE FUNCTION """ delta_years = np.diff(years) delta_x = np.diff(x) delta_m = np.diff(data_vals) def l2_norm(): return pyo.quicksum((delta_m / delta_years - delta_x / delta_years) ** 2) model.obj = pyo.Objective(expr=l2_norm(), sense=pyo.minimize) """ CONSTRAINTS """ model.hist_val = pyo.Constraint(expr=model.x[harmonize_year] == hist_val) model.budget = pyo.Constraint(expr=carbon_budget(years, x) == budget_val) """ RUN """ results = solver.solve(model) assert (results.solver.status == pyo.SolverStatus.ok) and ( results.solver.termination_condition == pyo.TerminationCondition.optimal ), ( f"ipopt terminated budget optimization with status: " f"{results.solver.status}, {results.solver.termination_condition}" ) harmonized.append([pyo.value(model.x[y]) for y in years]) df_harm = pd.DataFrame( harmonized, index=df.index, columns=years.astype(str), ) return df_harm def model_zero(df, offset, harmonize_year="2015"): """ Returns result of aneris.methods.constant_offset() """ # current decision is to return a simple offset, this will be a straight # line for all time periods. previous behavior was to set df[numcols] = 0, # i.e., report 0 if model reports 0. return constant_offset(df, offset) def hist_zero(df, *args, **kwargs): """ Returns df (no change) """ # TODO: should this set values to 0? df = df.copy() return df def coeff_of_var(s): """ Returns coefficient of variation of a Series. .. math:: c_v = \\frac{\\sigma(s^{\\prime}(t))}{\\mu(s^{\\prime}(t))} Parameters ---------- s : pd.Series timeseries Returns ------- c_v : float coefficient of variation """ x = np.diff(s.values) return np.abs(np.std(x) / np.mean(x)) def default_method_choice( row, ratio_method, offset_method, luc_method, luc_cov_threshold ): """ Default decision tree as documented at. Refer to choice flow chart at https://drive.google.com/drive/folders/0B6_Oqvcg8eP9QXVKX2lFVUJiZHc for arguments available in row and their definition """ # special cases if row.h == 0: return "hist_zero" if row.zero_m: return "model_zero" if np.isinf(row.f) and row.neg_m and row.pos_m: # model == 0 in base year, and model goes negative # and positive return "unicorn" # this shouldn't exist! # model 0 in base year? if np.isclose(row.m, 0): # goes negative? if row.neg_m: return offset_method else: return "constant_offset" else: # is this co2? # ZN: This gas dependence isn't documented in the default # decision tree if hasattr(row, "gas") and row.gas == "CO2": return ratio_method # is cov big? if np.isfinite(row["cov"]) and row["cov"] > luc_cov_threshold: return luc_method else: # dH small? if row.dH < 0.5: return ratio_method else: # goes negative? if row.neg_m: return "reduce_ratio_2100" else: return "constant_ratio" def default_methods(hist, model, base_year, method_choice=None, **kwargs): """ Determine default harmonization methods to use. See http://mattgidden.com/aneris/theory.html#default-decision-tree for a graphical description of the decision tree. Parameters ---------- hist : pd.DataFrame historical data model : pd.DataFrame model data base_year : string, int harmonization year method_choice : function, optional codified decision tree, see `default_method_choice` function **kwargs : Additional parameters passed on to the choice function: ratio_method : string, optional method to use for ratio harmonization, default: reduce_ratio_2080 offset_method : string, optional method to use for offset harmonization, default: reduce_offset_2080 luc_method : string, optional method to use for high coefficient of variation, reduce_offset_2150_cov luc_cov_threshold : float cov threshold above which to use `luc_method` Returns ------- methods : pd.Series default harmonization methods metadata : pd.DataFrame metadata regarding why each method was chosen See also -------- `default_method_choice` """ if kwargs.get("ratio_method") is None: kwargs["ratio_method"] = "reduce_ratio_2080" if kwargs.get("offset_method") is None: kwargs["offset_method"] = "reduce_offset_2080" if kwargs.get("luc_method") is None: kwargs["luc_method"] = "reduce_offset_2150_cov" if kwargs.get("luc_cov_threshold") is None: kwargs["luc_cov_threshold"] = 10 y = str(base_year) try: h = hist[base_year] m = model[base_year] except KeyError: h = hist[y] m = model[y] dH = (h - m).abs() / h f = h / m dM = (model.max(axis=1) - model.min(axis=1)).abs() / model.max(axis=1) neg_m = (model < 0).any(axis=1) pos_m = (model > 0).any(axis=1) zero_m = (model == 0).all(axis=1) go_neg = ((model.min(axis=1) - h) < 0).any() cov = hist.apply(coeff_of_var, axis=1) df = pd.DataFrame( { "dH": dH, "f": f, "dM": dM, "neg_m": neg_m, "pos_m": pos_m, "zero_m": zero_m, "go_neg": go_neg, "cov": cov, "h": h, "m": m, } ).join(model.index.to_frame()) if method_choice is None: method_choice = default_method_choice ret = df.apply(method_choice, axis=1, **kwargs) ret.name = "method" return ret, df
budget
identifier_name
methods.py
""" This module defines all possible functional forms of harmonization methods and the default decision tree for choosing which method to use. """ from bisect import bisect import numpy as np import pandas as pd import pyomo.environ as pyo from aneris import utils def harmonize_factors(df, hist, harmonize_year="2015"): """ Calculate offset and ratio values between data and history. Parameters ---------- df : pd.DataFrame model data hist : pd.DataFrame historical data harmonize_year : string, optional column name of harmonization year Returns ------- offset : pd.Series offset (history - model) ratio : pd.Series ratio (history / model) """ c, m = hist[harmonize_year], df[harmonize_year] offset = (c - m).fillna(0) offset.name = "offset" ratios = (c / m).replace(np.inf, np.nan).fillna(0) ratios.name = "ratio" return offset, ratios def constant_offset(df, offset, harmonize_year="2015"): """ Calculate constant offset harmonized trajectory. Parameters ---------- df : pd.DataFrame model data offset : pd.DataFrame offset data harmonize_year : string, optional column name of harmonization year, ignored Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() numcols = utils.numcols(df) # just add offset to all values df[numcols] = df[numcols].add(offset, axis=0) return df def constant_ratio(df, ratios, harmonize_year="2015"): """ Calculate constant ratio harmonized trajectory. Parameters ---------- df : pd.DataFrame model data ratio : pd.DataFrame ratio data harmonize_year : string, optional column name of harmonization year, ignored Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() numcols = utils.numcols(df) # just add offset to all values df[numcols] = df[numcols].multiply(ratios, axis=0) return df def linear_interpolate(df, offset, final_year="2050", harmonize_year="2015"): """ Calculate linearly interpolated convergence harmonized trajectory. Parameters ---------- df : pd.DataFrame model data offset : pd.DataFrame offset data final_year : string, optional column name of convergence year harmonize_year : string, optional column name of harmonization year Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() x1, x2 = harmonize_year, final_year y1, y2 = offset + df[x1], df[x2] m = (y2 - y1) / (float(x2) - float(x1)) b = y1 - m * float(x1) cols = [x for x in utils.numcols(df) if int(x) < int(final_year)] for c in cols: df[c] = m * float(c) + b return df def reduce_offset(df, offset, final_year="2050", harmonize_year="2015"): """ Calculate offset convergence harmonized trajectory. Parameters ---------- df : pd.DataFrame model data offset : pd.DataFrame offset data final_year : string, optional column name of convergence year harmonize_year : string, optional column name of harmonization year Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() yi, yf = int(harmonize_year), int(final_year) numcols = utils.numcols(df) numcols_int = [int(v) for v in numcols] # get factors that reduce from 1 to 0; factors before base year are > 1 f = lambda year: -(year - yi) / float(yf - yi) + 1 factors = [f(year) if year <= yf else 0.0 for year in numcols_int] # add existing values to offset time series offsets = pd.DataFrame( np.outer(offset, factors), columns=numcols, index=offset.index ) df[numcols] = df[numcols] + offsets return df def reduce_ratio(df, ratios, final_year="2050", harmonize_year="2015"): """ Calculate ratio convergence harmonized trajectory. Parameters ---------- df : pd.DataFrame model data ratio : pd.DataFrame ratio data final_year : string, optional column name of convergence year harmonize_year : string, optional column name of harmonization year Returns ------- df : pd.DataFrame harmonized trajectories """ df = df.copy() yi, yf = int(harmonize_year), int(final_year) numcols = utils.numcols(df) numcols_int = [int(v) for v in numcols] # get factors that reduce from 1 to 0, but replace with 1s in years prior # to harmonization f = lambda year: -(year - yi) / float(yf - yi) + 1 prefactors = [f(yi) for year in numcols_int if year < yi] postfactors = [f(year) if year <= yf else 0.0 for year in numcols_int if year >= yi] factors = prefactors + postfactors # multiply existing values by ratio time series ratios = (
df[numcols] = df[numcols] * ratios return df def budget(df, df_hist, harmonize_year="2015"): r""" Calculate budget harmonized trajectory. Parameters ---------- df : pd.DataFrame model data df_hist : pd.DataFrame historic data harmonize_year : string, optional column name of harmonization year Returns ------- df_harm : pd.DataFrame harmonized trajectories Notes ----- Finds an emissions trajectory consistent with a provided historical emissions timeseries that closely matches a modeled result, while maintaining the overall carbon budget. An optimization problem is constructed and solved by IPOPT, which minimizes the difference between the rate of change of the model and the harmonized model in each year, while 1. preserving the carbon budget of the model, and 2. being consistent with the historical value. With years :math:`y_i`, model results :math:`m_i`, harmonized results :math:`x_i`, historical value :math:`h_0` and a remaining carbon budget :math:`B`, the optimization problem can be formulated as .. math:: \min_{x_i} \sum_{i \in |I - 1|} \big( \frac{m_{i+1} - m_i}{y_{i + 1} - y_{i}} - \frac{x_{i+1} - x_i}{y_{i + 1} - y_{i}} \big)^2 s.t. .. math:: \sum_{i} (y_{i + 1} - y_{i}) \big( x_i + 0.5 (x_{i+1} - x_i) \big) = B \quad \text{(carbon budget preservation)} and .. math:: x_0 = h_0 \quad \text{(consistency with historical values)} """ harmonize_year = int(harmonize_year) df = df.set_axis(df.columns.astype(int), axis="columns") df_hist = df_hist.set_axis(df_hist.columns.astype(int), axis="columns") data_years = df.columns hist_years = df_hist.columns years = data_years[data_years >= harmonize_year] if data_years[0] not in hist_years: hist_years = hist_years.insert(bisect(hist_years, data_years[0]), data_years[0]) df_hist = df_hist.reindex(columns=hist_years).interpolate( method="slinear", axis=1 ) def carbon_budget(years, emissions): # trapezoid rule dyears = np.diff(years) demissions = np.diff(emissions) budget = (dyears * (np.asarray(emissions)[:-1] + demissions / 2)).sum() return budget solver = pyo.SolverFactory("ipopt") if solver.executable() is None: raise RuntimeError( "No executable for the solver 'ipopt' found " "(necessary for the budget harmonization). " "Install from conda-forge or add to PATH." ) harmonized = [] for region in df.index: model = pyo.ConcreteModel() """ PARAMETERS """ data_vals = df.loc[region, years] hist_val = df_hist.loc[region, harmonize_year] budget_val = carbon_budget(data_years, df.loc[region, :]) if data_years[0] < harmonize_year: hist_in_overlap = df_hist.loc[region, data_years[0] : harmonize_year] budget_val -= carbon_budget(hist_in_overlap.index, hist_in_overlap) """ VARIABLES """ model.x = pyo.Var(years, initialize=0, domain=pyo.Reals) x = np.array( [model.x[y] for y in years] ) # keeps pyomo VarData objects, ie. modelling vars not numbers """ OBJECTIVE FUNCTION """ delta_years = np.diff(years) delta_x = np.diff(x) delta_m = np.diff(data_vals) def l2_norm(): return pyo.quicksum((delta_m / delta_years - delta_x / delta_years) ** 2) model.obj = pyo.Objective(expr=l2_norm(), sense=pyo.minimize) """ CONSTRAINTS """ model.hist_val = pyo.Constraint(expr=model.x[harmonize_year] == hist_val) model.budget = pyo.Constraint(expr=carbon_budget(years, x) == budget_val) """ RUN """ results = solver.solve(model) assert (results.solver.status == pyo.SolverStatus.ok) and ( results.solver.termination_condition == pyo.TerminationCondition.optimal ), ( f"ipopt terminated budget optimization with status: " f"{results.solver.status}, {results.solver.termination_condition}" ) harmonized.append([pyo.value(model.x[y]) for y in years]) df_harm = pd.DataFrame( harmonized, index=df.index, columns=years.astype(str), ) return df_harm def model_zero(df, offset, harmonize_year="2015"): """ Returns result of aneris.methods.constant_offset() """ # current decision is to return a simple offset, this will be a straight # line for all time periods. previous behavior was to set df[numcols] = 0, # i.e., report 0 if model reports 0. return constant_offset(df, offset) def hist_zero(df, *args, **kwargs): """ Returns df (no change) """ # TODO: should this set values to 0? df = df.copy() return df def coeff_of_var(s): """ Returns coefficient of variation of a Series. .. math:: c_v = \\frac{\\sigma(s^{\\prime}(t))}{\\mu(s^{\\prime}(t))} Parameters ---------- s : pd.Series timeseries Returns ------- c_v : float coefficient of variation """ x = np.diff(s.values) return np.abs(np.std(x) / np.mean(x)) def default_method_choice( row, ratio_method, offset_method, luc_method, luc_cov_threshold ): """ Default decision tree as documented at. Refer to choice flow chart at https://drive.google.com/drive/folders/0B6_Oqvcg8eP9QXVKX2lFVUJiZHc for arguments available in row and their definition """ # special cases if row.h == 0: return "hist_zero" if row.zero_m: return "model_zero" if np.isinf(row.f) and row.neg_m and row.pos_m: # model == 0 in base year, and model goes negative # and positive return "unicorn" # this shouldn't exist! # model 0 in base year? if np.isclose(row.m, 0): # goes negative? if row.neg_m: return offset_method else: return "constant_offset" else: # is this co2? # ZN: This gas dependence isn't documented in the default # decision tree if hasattr(row, "gas") and row.gas == "CO2": return ratio_method # is cov big? if np.isfinite(row["cov"]) and row["cov"] > luc_cov_threshold: return luc_method else: # dH small? if row.dH < 0.5: return ratio_method else: # goes negative? if row.neg_m: return "reduce_ratio_2100" else: return "constant_ratio" def default_methods(hist, model, base_year, method_choice=None, **kwargs): """ Determine default harmonization methods to use. See http://mattgidden.com/aneris/theory.html#default-decision-tree for a graphical description of the decision tree. Parameters ---------- hist : pd.DataFrame historical data model : pd.DataFrame model data base_year : string, int harmonization year method_choice : function, optional codified decision tree, see `default_method_choice` function **kwargs : Additional parameters passed on to the choice function: ratio_method : string, optional method to use for ratio harmonization, default: reduce_ratio_2080 offset_method : string, optional method to use for offset harmonization, default: reduce_offset_2080 luc_method : string, optional method to use for high coefficient of variation, reduce_offset_2150_cov luc_cov_threshold : float cov threshold above which to use `luc_method` Returns ------- methods : pd.Series default harmonization methods metadata : pd.DataFrame metadata regarding why each method was chosen See also -------- `default_method_choice` """ if kwargs.get("ratio_method") is None: kwargs["ratio_method"] = "reduce_ratio_2080" if kwargs.get("offset_method") is None: kwargs["offset_method"] = "reduce_offset_2080" if kwargs.get("luc_method") is None: kwargs["luc_method"] = "reduce_offset_2150_cov" if kwargs.get("luc_cov_threshold") is None: kwargs["luc_cov_threshold"] = 10 y = str(base_year) try: h = hist[base_year] m = model[base_year] except KeyError: h = hist[y] m = model[y] dH = (h - m).abs() / h f = h / m dM = (model.max(axis=1) - model.min(axis=1)).abs() / model.max(axis=1) neg_m = (model < 0).any(axis=1) pos_m = (model > 0).any(axis=1) zero_m = (model == 0).all(axis=1) go_neg = ((model.min(axis=1) - h) < 0).any() cov = hist.apply(coeff_of_var, axis=1) df = pd.DataFrame( { "dH": dH, "f": f, "dM": dM, "neg_m": neg_m, "pos_m": pos_m, "zero_m": zero_m, "go_neg": go_neg, "cov": cov, "h": h, "m": m, } ).join(model.index.to_frame()) if method_choice is None: method_choice = default_method_choice ret = df.apply(method_choice, axis=1, **kwargs) ret.name = "method" return ret, df
pd.DataFrame(np.outer(ratios - 1, factors), columns=numcols, index=ratios.index) + 1 )
random_line_split
hw.py
import os import json import random from collections import namedtuple import timeit MODE = 'set_and_namedtuple' #'simple' FILE_NAME1 = "winedata_1.json" FILE_NAME2 = "winedata_2.json" #преобразуем словарь в NamedTuple def from_dict_to_namedtuple(dict_): return namedtuple('Wine',dict_.keys())(**dict_) def from_namedtuple_to_dict(wine):
try: with open(out_filename, 'w', encoding=encoding_table) as f: json.dump(out_dict, f, ensure_ascii=False) print(f"файл {out_filename} создался успешно!") except: print("Ошибка при записи выходного файла JSON") def get_most_common_object(dict_): max_ = 0 for key,value in dict_.items(): if value > max_: max_ = value most_common_object = key return most_common_object def main(): if os.path.exists(FILE_NAME1): # Читаем JSON из файла и преобразуем к типу Python with open(FILE_NAME1, 'r', encoding='UTF-8') as f: read_data_1 = json.load(f) else: print(f"{FILE_NAME1} File not found!") if os.path.exists(FILE_NAME2): # Читаем JSON из файла и преобразуем к типу Python with open(FILE_NAME2, 'r', encoding='UTF-8') as f: read_data_2 = json.load(f) else: print(f"{FILE_NAME2} File not found!") if MODE == 'set_and_namedtuple': #--убираем дубликаты моим методом dict_wines = read_data_1 + read_data_2 wines = [] wines = [from_dict_to_namedtuple(dict_wine) for dict_wine in dict_wines] #уничтожение дубликатов wines = list(set(wines)) wines = [from_namedtuple_to_dict(wine) for wine in wines] else: #--убираем дубликаты методом вложенных циклов # read_data_1_new = [] # for dict_wine2 in read_data_1: # for dict_wine1 in read_data_1: # if dict_wine1 == dict_wine2: # break # else: # read_data_1_new.append(dict_wine2) read_data_2_new = [] for dict_wine2 in read_data_2: for dict_wine1 in read_data_2: if dict_wine1 == dict_wine2: break else: read_data_2_new.append(dict_wine2) read_data_1_new = read_data_1 for dict_wine2 in read_data_2_new: for dict_wine1 in read_data_1_new: if dict_wine1 == dict_wine2: break else: read_data_1.append(dict_wine2) wines = read_data_1 # [from_dict_to_namedtuple(dict_wine) for dict_wine in read_data_1] print("дляна результирующего списка без дублей", len(wines)) #сортировка # по цене в нисходящем порядке по убыванию (или по сорту в лексикографическом порядке) #мы преобразуем 2 числа (индекс и цену) в 1 число # w = len(wines) * 10 #вычисляем вес # (вес надо взять для цены побольше чтобы он дал наибольший вклад и перекрыл значение индекса) Index_table_price = [] for n, value in enumerate(wines, 0): try: if value['price']: price = int(value['price']) else: price = 0 except: print("TypeError: incorrect format of price!") index_price = n + price * w #вычисляем составной индекс Index_table_price.append(index_price) sorted_wines = [] #полученную индексную таблицу (аналог индекса в БД) сортируем #для этого используем встроенную функцию Index_table_price = sorted(Index_table_price, reverse=True) prev_price = Index_table_price[0] Index_table_title = [] for index_price in Index_table_price: # разлагаем обратно на индекс и цену price = index_price // w n = index_price % w #сделать группировку #получаем кучу маленьких списков if price != prev_price: #сформировали список с одинаковыми ценами #теперь сортируем по наименованию в лексикографическом порядке Index_table_title = sorted(Index_table_title) for index_title in Index_table_title: #разлагаем индекс на составные части lst = index_title.split('split') title = lst[0] j = int(lst[1]) sorted_wines.append(wines[j]) Index_table_title = [] # опять строим составной строчный индекс index_title = wines[n]['title'] + "split" + str(n) Index_table_title.append(index_title) prev_price = price out_file(sorted_wines, 'winedata_full.json', 'UTF-8') print("-------------------------------------------------------------------------------------------------") statistics = {} lst_varieties = ['Gewürztraminer', 'Riesling', 'Merlot', 'Madera', 'Tempranillo', 'Red Blend'] sort_wine = {} for variety in lst_varieties: selected_wines = [] for wine in sorted_wines: if variety == wine['variety']: selected_wines.append(wine) if not selected_wines: continue prices = [] score = [] dict_common_country = {} dict_common_region = {} for wine in selected_wines: if wine['country'] not in dict_common_country: dict_common_country[wine['country']] = 0 if wine['region_1'] not in dict_common_region: dict_common_region[wine['region_1']] = 0 dict_common_country[wine['country']] += 1 dict_common_region[wine['region_1']] += 1 prices.append(wine['price']) score.append(int(wine['points'])) average_price = round(sum(prices)/len(prices),1) max_price = max(prices) min_price = min(prices) min_price = min(prices) average_score = round(sum(score)/len(score),1) # * `most_common_region` где больше всего вин этого сорта производят ? # most_common_country #используем словарь most_common_country = get_most_common_object(dict_common_country) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! most_common_region = get_most_common_object(dict_common_region) variety = variety #.replace('â','ae').replace('ü','ue') #вместо немецких вставить общепринятые символы sort_wine[variety] = {"average_price":0, "min_price":0, "max_price":0, "most_common_country":0, "most_common_region":0, "average_score":0} sort_wine[variety]["average_price"] = str(average_price) sort_wine[variety]["min_price"] = str(min_price) sort_wine[variety]["max_price"] = str(max_price) sort_wine[variety]["most_common_country"] = most_common_country sort_wine[variety]["most_common_region"] = most_common_region sort_wine[variety]["average_score"] = average_score statistics["wine"] = sort_wine # * `most_expensive_wine` в случае коллизий тут и далее делаем список. # * `cheapest_wine` # * `highest_score` # * `lowest_score` # * `most_expensive_coutry` в среднем самое дорогое вино среди стран # * `cheapest_coutry` в среднем самое дешевое вино среди стран # * `most_rated_country` # * `underrated_country` # * `most_active_commentator` #в случае коллизий тут и далее делаем список. most_expensive_wine = sorted_wines[0] cheapest_wine = sorted_wines[-1] lst_cheapest_wines = [] i = len(sorted_wines) - 1 while i >= 0: next_wine = sorted_wines[i] if cheapest_wine['price'] == next_wine['price']: lst_cheapest_wines.append(next_wine['title']) #.replace('â','ae').replace('ü','ue')) else: break i -= 1 statistics["most_expensive_wine"] = most_expensive_wine['title'] #.replace('â','ae').replace('ü','ue') #вместо немецких вставить общепринятые символы statistics["cheapest_wine"] = lst_cheapest_wines # * `most_active_commentator` # * `most_expensive_coutry` dict_commentator = {} dict_country = {} for wine in sorted_wines: if wine['taster_name'] not in dict_commentator: dict_commentator[wine['taster_name']] = 0 if wine['country'] not in dict_country: dict_country[wine['country']] = [0,0,0] dict_commentator[wine['taster_name']] += 1 dict_country[wine['country']][0] += 1 dict_country[wine['country']][1] += wine['price'] dict_country[wine['country']][2] += int(wine['points']) # * `highest_score` lowest_score = int(sorted_wines[0]['points']) highest_score = 0 for wine in sorted_wines: if int(wine['points']) > highest_score: highest_score = int(wine['points']) if int(wine['points']) < lowest_score: lowest_score = int(wine['points']) statistics["highest_score"] = highest_score statistics["lowest_score"] = lowest_score # max_ = 0 min_ = most_expensive_wine['price'] max_rating = 0 min_rating = highest_score for country, lst in dict_country.items(): if lst[1]/lst[0] > max_: max_ = lst[1]/lst[0] most_expensive_coutry = country if lst[1]/lst[0] < min_: min_ = lst[1]/lst[0] cheapest_coutry = country # * `most_rated_country` # * `most_rated_country` if lst[2] > max_rating: max_rating = lst[2] most_rated_country = country if lst[2] < min_rating: min_rating = lst[2] underrated_country = country statistics['most_rated_country'] = most_rated_country statistics['underrated_country'] = underrated_country statistics['most_expensive_coutry'] = most_expensive_coutry statistics['cheapest_coutry'] = cheapest_coutry #------------------------------------------------------ max_ = max(dict_commentator.values()) most_active_commentator = [] for commentator,count_wines in dict_commentator.items(): if count_wines == max_ and commentator is not None: most_active_commentator.append(commentator) statistics["most_active_commentator"] = most_active_commentator #------------------------------------------------------- out_dict = {} out_dict["statistics"] = statistics print(out_dict) out_file(out_dict, 'stats.json', 'UTF-16') #--красивый MARKDOWN файл--------------- fout = open('selected_wines_statistics.txt', 'wt', encoding='utf-16') for variety, sort_wine in statistics['wine'].items(): print('variety:', variety, file=fout, sep='\t') print('average_price:',sort_wine['average_price'], file=fout, sep='\t') print('min_price:',sort_wine['min_price'], file=fout, sep='\t') print('max_price:',sort_wine['max_price'], file=fout, sep='\t') print('most_common_region:',sort_wine['most_common_region'], file=fout, sep='\t') print('most_common_country:', sort_wine['most_common_country'], file=fout, sep='\t') print('average_score:',sort_wine['average_score'], file=fout, sep='\t') print('------------------------------------------', file=fout) fout.close() print('файл selected_wines_statistics.txt создался успешно!') fout = open('common_statistics.txt', 'wt', encoding='utf-16') most_expensive_wine = statistics["most_expensive_wine"] print("most_expensive_wine:", most_expensive_wine, file=fout, sep='\t') print("cheapest_wine:", statistics["cheapest_wine"], file=fout, sep='\t') print("highest_score:", statistics["highest_score"], file=fout, sep='\t') print("lowest_score:", statistics["lowest_score"], file=fout, sep='\t') print("most_rated_country:", statistics["most_rated_country"], file=fout, sep='\t') print("underrated_country:", statistics["underrated_country"], file=fout, sep='\t') print("most_expensive_coutry:", statistics["most_expensive_coutry"], file=fout, sep='\t') print("cheapest_coutry:", statistics["cheapest_coutry"], file=fout, sep='\t') print("most_active_commentator:", statistics["most_active_commentator"], file=fout, sep='\t') fout.close() print('файл common_statistics.txt создался успешно!') main() print('time of execution: ', timeit.timeit("main()", number=3))
d = wine._asdict() dict_ = dict(d) return dict_ def out_file(out_dict, out_filename, encoding_table):
random_line_split
hw.py
import os import json import random from collections import namedtuple import timeit MODE = 'set_and_namedtuple' #'simple' FILE_NAME1 = "winedata_1.json" FILE_NAME2 = "winedata_2.json" #преобразуем словарь в NamedTuple def from_dict_to_namedtuple(dict_): return namedtuple('Wine',dict_.keys())(**dict_) def from_namedtuple_to_dict(wine): d = wine._asdict() dict_ = dict(d) return dict_ def out_file(out_dict, out_filename, encoding_table): try: with o
in dict_.items(): if value > max_: max_ = value most_common_object = key return most_common_object def main(): if os.path.exists(FILE_NAME1): # Читаем JSON из файла и преобразуем к типу Python with open(FILE_NAME1, 'r', encoding='UTF-8') as f: read_data_1 = json.load(f) else: print(f"{FILE_NAME1} File not found!") if os.path.exists(FILE_NAME2): # Читаем JSON из файла и преобразуем к типу Python with open(FILE_NAME2, 'r', encoding='UTF-8') as f: read_data_2 = json.load(f) else: print(f"{FILE_NAME2} File not found!") if MODE == 'set_and_namedtuple': #--убираем дубликаты моим методом dict_wines = read_data_1 + read_data_2 wines = [] wines = [from_dict_to_namedtuple(dict_wine) for dict_wine in dict_wines] #уничтожение дубликатов wines = list(set(wines)) wines = [from_namedtuple_to_dict(wine) for wine in wines] else: #--убираем дубликаты методом вложенных циклов # read_data_1_new = [] # for dict_wine2 in read_data_1: # for dict_wine1 in read_data_1: # if dict_wine1 == dict_wine2: # break # else: # read_data_1_new.append(dict_wine2) read_data_2_new = [] for dict_wine2 in read_data_2: for dict_wine1 in read_data_2: if dict_wine1 == dict_wine2: break else: read_data_2_new.append(dict_wine2) read_data_1_new = read_data_1 for dict_wine2 in read_data_2_new: for dict_wine1 in read_data_1_new: if dict_wine1 == dict_wine2: break else: read_data_1.append(dict_wine2) wines = read_data_1 # [from_dict_to_namedtuple(dict_wine) for dict_wine in read_data_1] print("дляна результирующего списка без дублей", len(wines)) #сортировка # по цене в нисходящем порядке по убыванию (или по сорту в лексикографическом порядке) #мы преобразуем 2 числа (индекс и цену) в 1 число # w = len(wines) * 10 #вычисляем вес # (вес надо взять для цены побольше чтобы он дал наибольший вклад и перекрыл значение индекса) Index_table_price = [] for n, value in enumerate(wines, 0): try: if value['price']: price = int(value['price']) else: price = 0 except: print("TypeError: incorrect format of price!") index_price = n + price * w #вычисляем составной индекс Index_table_price.append(index_price) sorted_wines = [] #полученную индексную таблицу (аналог индекса в БД) сортируем #для этого используем встроенную функцию Index_table_price = sorted(Index_table_price, reverse=True) prev_price = Index_table_price[0] Index_table_title = [] for index_price in Index_table_price: # разлагаем обратно на индекс и цену price = index_price // w n = index_price % w #сделать группировку #получаем кучу маленьких списков if price != prev_price: #сформировали список с одинаковыми ценами #теперь сортируем по наименованию в лексикографическом порядке Index_table_title = sorted(Index_table_title) for index_title in Index_table_title: #разлагаем индекс на составные части lst = index_title.split('split') title = lst[0] j = int(lst[1]) sorted_wines.append(wines[j]) Index_table_title = [] # опять строим составной строчный индекс index_title = wines[n]['title'] + "split" + str(n) Index_table_title.append(index_title) prev_price = price out_file(sorted_wines, 'winedata_full.json', 'UTF-8') print("-------------------------------------------------------------------------------------------------") statistics = {} lst_varieties = ['Gewürztraminer', 'Riesling', 'Merlot', 'Madera', 'Tempranillo', 'Red Blend'] sort_wine = {} for variety in lst_varieties: selected_wines = [] for wine in sorted_wines: if variety == wine['variety']: selected_wines.append(wine) if not selected_wines: continue prices = [] score = [] dict_common_country = {} dict_common_region = {} for wine in selected_wines: if wine['country'] not in dict_common_country: dict_common_country[wine['country']] = 0 if wine['region_1'] not in dict_common_region: dict_common_region[wine['region_1']] = 0 dict_common_country[wine['country']] += 1 dict_common_region[wine['region_1']] += 1 prices.append(wine['price']) score.append(int(wine['points'])) average_price = round(sum(prices)/len(prices),1) max_price = max(prices) min_price = min(prices) min_price = min(prices) average_score = round(sum(score)/len(score),1) # * `most_common_region` где больше всего вин этого сорта производят ? # most_common_country #используем словарь most_common_country = get_most_common_object(dict_common_country) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! most_common_region = get_most_common_object(dict_common_region) variety = variety #.replace('â','ae').replace('ü','ue') #вместо немецких вставить общепринятые символы sort_wine[variety] = {"average_price":0, "min_price":0, "max_price":0, "most_common_country":0, "most_common_region":0, "average_score":0} sort_wine[variety]["average_price"] = str(average_price) sort_wine[variety]["min_price"] = str(min_price) sort_wine[variety]["max_price"] = str(max_price) sort_wine[variety]["most_common_country"] = most_common_country sort_wine[variety]["most_common_region"] = most_common_region sort_wine[variety]["average_score"] = average_score statistics["wine"] = sort_wine # * `most_expensive_wine` в случае коллизий тут и далее делаем список. # * `cheapest_wine` # * `highest_score` # * `lowest_score` # * `most_expensive_coutry` в среднем самое дорогое вино среди стран # * `cheapest_coutry` в среднем самое дешевое вино среди стран # * `most_rated_country` # * `underrated_country` # * `most_active_commentator` #в случае коллизий тут и далее делаем список. most_expensive_wine = sorted_wines[0] cheapest_wine = sorted_wines[-1] lst_cheapest_wines = [] i = len(sorted_wines) - 1 while i >= 0: next_wine = sorted_wines[i] if cheapest_wine['price'] == next_wine['price']: lst_cheapest_wines.append(next_wine['title']) #.replace('â','ae').replace('ü','ue')) else: break i -= 1 statistics["most_expensive_wine"] = most_expensive_wine['title'] #.replace('â','ae').replace('ü','ue') #вместо немецких вставить общепринятые символы statistics["cheapest_wine"] = lst_cheapest_wines # * `most_active_commentator` # * `most_expensive_coutry` dict_commentator = {} dict_country = {} for wine in sorted_wines: if wine['taster_name'] not in dict_commentator: dict_commentator[wine['taster_name']] = 0 if wine['country'] not in dict_country: dict_country[wine['country']] = [0,0,0] dict_commentator[wine['taster_name']] += 1 dict_country[wine['country']][0] += 1 dict_country[wine['country']][1] += wine['price'] dict_country[wine['country']][2] += int(wine['points']) # * `highest_score` lowest_score = int(sorted_wines[0]['points']) highest_score = 0 for wine in sorted_wines: if int(wine['points']) > highest_score: highest_score = int(wine['points']) if int(wine['points']) < lowest_score: lowest_score = int(wine['points']) statistics["highest_score"] = highest_score statistics["lowest_score"] = lowest_score # max_ = 0 min_ = most_expensive_wine['price'] max_rating = 0 min_rating = highest_score for country, lst in dict_country.items(): if lst[1]/lst[0] > max_: max_ = lst[1]/lst[0] most_expensive_coutry = country if lst[1]/lst[0] < min_: min_ = lst[1]/lst[0] cheapest_coutry = country # * `most_rated_country` # * `most_rated_country` if lst[2] > max_rating: max_rating = lst[2] most_rated_country = country if lst[2] < min_rating: min_rating = lst[2] underrated_country = country statistics['most_rated_country'] = most_rated_country statistics['underrated_country'] = underrated_country statistics['most_expensive_coutry'] = most_expensive_coutry statistics['cheapest_coutry'] = cheapest_coutry #------------------------------------------------------ max_ = max(dict_commentator.values()) most_active_commentator = [] for commentator,count_wines in dict_commentator.items(): if count_wines == max_ and commentator is not None: most_active_commentator.append(commentator) statistics["most_active_commentator"] = most_active_commentator #------------------------------------------------------- out_dict = {} out_dict["statistics"] = statistics print(out_dict) out_file(out_dict, 'stats.json', 'UTF-16') #--красивый MARKDOWN файл--------------- fout = open('selected_wines_statistics.txt', 'wt', encoding='utf-16') for variety, sort_wine in statistics['wine'].items(): print('variety:', variety, file=fout, sep='\t') print('average_price:',sort_wine['average_price'], file=fout, sep='\t') print('min_price:',sort_wine['min_price'], file=fout, sep='\t') print('max_price:',sort_wine['max_price'], file=fout, sep='\t') print('most_common_region:',sort_wine['most_common_region'], file=fout, sep='\t') print('most_common_country:', sort_wine['most_common_country'], file=fout, sep='\t') print('average_score:',sort_wine['average_score'], file=fout, sep='\t') print('------------------------------------------', file=fout) fout.close() print('файл selected_wines_statistics.txt создался успешно!') fout = open('common_statistics.txt', 'wt', encoding='utf-16') most_expensive_wine = statistics["most_expensive_wine"] print("most_expensive_wine:", most_expensive_wine, file=fout, sep='\t') print("cheapest_wine:", statistics["cheapest_wine"], file=fout, sep='\t') print("highest_score:", statistics["highest_score"], file=fout, sep='\t') print("lowest_score:", statistics["lowest_score"], file=fout, sep='\t') print("most_rated_country:", statistics["most_rated_country"], file=fout, sep='\t') print("underrated_country:", statistics["underrated_country"], file=fout, sep='\t') print("most_expensive_coutry:", statistics["most_expensive_coutry"], file=fout, sep='\t') print("cheapest_coutry:", statistics["cheapest_coutry"], file=fout, sep='\t') print("most_active_commentator:", statistics["most_active_commentator"], file=fout, sep='\t') fout.close() print('файл common_statistics.txt создался успешно!') main() print('time of execution: ', timeit.timeit("main()", number=3))
pen(out_filename, 'w', encoding=encoding_table) as f: json.dump(out_dict, f, ensure_ascii=False) print(f"файл {out_filename} создался успешно!") except: print("Ошибка при записи выходного файла JSON") def get_most_common_object(dict_): max_ = 0 for key,value
identifier_body
hw.py
import os import json import random from collections import namedtuple import timeit MODE = 'set_and_namedtuple' #'simple' FILE_NAME1 = "winedata_1.json" FILE_NAME2 = "winedata_2.json" #преобразуем словарь в NamedTuple def from_dict_to_namedtuple(dict_): return namedtuple('Wine',dict_.keys())(**dict_) def from_namedtuple_to_dict(wine): d = wine._asdict() dict_ = dict(d) return dict_ def out_file(out_dict, out_filename, encoding_table): try: with open(out_filename, 'w', encoding=encoding_table) as f: json.dump(out_dict, f, ensure_ascii=False) print(f"файл {out_filename} создался успешно!") except: print("Ошибка при записи выходного файла JSON") def get_most_common_object(dict_): max_ = 0 for key,value in dict_.items(): if value > max_: max_ = value most_common_object = key return most_common_object def main(): if os.path.exists(FILE_NAME1): # Читаем JSON из
ла и преобразуем к типу Python with open(FILE_NAME1, 'r', encoding='UTF-8') as f: read_data_1 = json.load(f) else: print(f"{FILE_NAME1} File not found!") if os.path.exists(FILE_NAME2): # Читаем JSON из файла и преобразуем к типу Python with open(FILE_NAME2, 'r', encoding='UTF-8') as f: read_data_2 = json.load(f) else: print(f"{FILE_NAME2} File not found!") if MODE == 'set_and_namedtuple': #--убираем дубликаты моим методом dict_wines = read_data_1 + read_data_2 wines = [] wines = [from_dict_to_namedtuple(dict_wine) for dict_wine in dict_wines] #уничтожение дубликатов wines = list(set(wines)) wines = [from_namedtuple_to_dict(wine) for wine in wines] else: #--убираем дубликаты методом вложенных циклов # read_data_1_new = [] # for dict_wine2 in read_data_1: # for dict_wine1 in read_data_1: # if dict_wine1 == dict_wine2: # break # else: # read_data_1_new.append(dict_wine2) read_data_2_new = [] for dict_wine2 in read_data_2: for dict_wine1 in read_data_2: if dict_wine1 == dict_wine2: break else: read_data_2_new.append(dict_wine2) read_data_1_new = read_data_1 for dict_wine2 in read_data_2_new: for dict_wine1 in read_data_1_new: if dict_wine1 == dict_wine2: break else: read_data_1.append(dict_wine2) wines = read_data_1 # [from_dict_to_namedtuple(dict_wine) for dict_wine in read_data_1] print("дляна результирующего списка без дублей", len(wines)) #сортировка # по цене в нисходящем порядке по убыванию (или по сорту в лексикографическом порядке) #мы преобразуем 2 числа (индекс и цену) в 1 число # w = len(wines) * 10 #вычисляем вес # (вес надо взять для цены побольше чтобы он дал наибольший вклад и перекрыл значение индекса) Index_table_price = [] for n, value in enumerate(wines, 0): try: if value['price']: price = int(value['price']) else: price = 0 except: print("TypeError: incorrect format of price!") index_price = n + price * w #вычисляем составной индекс Index_table_price.append(index_price) sorted_wines = [] #полученную индексную таблицу (аналог индекса в БД) сортируем #для этого используем встроенную функцию Index_table_price = sorted(Index_table_price, reverse=True) prev_price = Index_table_price[0] Index_table_title = [] for index_price in Index_table_price: # разлагаем обратно на индекс и цену price = index_price // w n = index_price % w #сделать группировку #получаем кучу маленьких списков if price != prev_price: #сформировали список с одинаковыми ценами #теперь сортируем по наименованию в лексикографическом порядке Index_table_title = sorted(Index_table_title) for index_title in Index_table_title: #разлагаем индекс на составные части lst = index_title.split('split') title = lst[0] j = int(lst[1]) sorted_wines.append(wines[j]) Index_table_title = [] # опять строим составной строчный индекс index_title = wines[n]['title'] + "split" + str(n) Index_table_title.append(index_title) prev_price = price out_file(sorted_wines, 'winedata_full.json', 'UTF-8') print("-------------------------------------------------------------------------------------------------") statistics = {} lst_varieties = ['Gewürztraminer', 'Riesling', 'Merlot', 'Madera', 'Tempranillo', 'Red Blend'] sort_wine = {} for variety in lst_varieties: selected_wines = [] for wine in sorted_wines: if variety == wine['variety']: selected_wines.append(wine) if not selected_wines: continue prices = [] score = [] dict_common_country = {} dict_common_region = {} for wine in selected_wines: if wine['country'] not in dict_common_country: dict_common_country[wine['country']] = 0 if wine['region_1'] not in dict_common_region: dict_common_region[wine['region_1']] = 0 dict_common_country[wine['country']] += 1 dict_common_region[wine['region_1']] += 1 prices.append(wine['price']) score.append(int(wine['points'])) average_price = round(sum(prices)/len(prices),1) max_price = max(prices) min_price = min(prices) min_price = min(prices) average_score = round(sum(score)/len(score),1) # * `most_common_region` где больше всего вин этого сорта производят ? # most_common_country #используем словарь most_common_country = get_most_common_object(dict_common_country) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! most_common_region = get_most_common_object(dict_common_region) variety = variety #.replace('â','ae').replace('ü','ue') #вместо немецких вставить общепринятые символы sort_wine[variety] = {"average_price":0, "min_price":0, "max_price":0, "most_common_country":0, "most_common_region":0, "average_score":0} sort_wine[variety]["average_price"] = str(average_price) sort_wine[variety]["min_price"] = str(min_price) sort_wine[variety]["max_price"] = str(max_price) sort_wine[variety]["most_common_country"] = most_common_country sort_wine[variety]["most_common_region"] = most_common_region sort_wine[variety]["average_score"] = average_score statistics["wine"] = sort_wine # * `most_expensive_wine` в случае коллизий тут и далее делаем список. # * `cheapest_wine` # * `highest_score` # * `lowest_score` # * `most_expensive_coutry` в среднем самое дорогое вино среди стран # * `cheapest_coutry` в среднем самое дешевое вино среди стран # * `most_rated_country` # * `underrated_country` # * `most_active_commentator` #в случае коллизий тут и далее делаем список. most_expensive_wine = sorted_wines[0] cheapest_wine = sorted_wines[-1] lst_cheapest_wines = [] i = len(sorted_wines) - 1 while i >= 0: next_wine = sorted_wines[i] if cheapest_wine['price'] == next_wine['price']: lst_cheapest_wines.append(next_wine['title']) #.replace('â','ae').replace('ü','ue')) else: break i -= 1 statistics["most_expensive_wine"] = most_expensive_wine['title'] #.replace('â','ae').replace('ü','ue') #вместо немецких вставить общепринятые символы statistics["cheapest_wine"] = lst_cheapest_wines # * `most_active_commentator` # * `most_expensive_coutry` dict_commentator = {} dict_country = {} for wine in sorted_wines: if wine['taster_name'] not in dict_commentator: dict_commentator[wine['taster_name']] = 0 if wine['country'] not in dict_country: dict_country[wine['country']] = [0,0,0] dict_commentator[wine['taster_name']] += 1 dict_country[wine['country']][0] += 1 dict_country[wine['country']][1] += wine['price'] dict_country[wine['country']][2] += int(wine['points']) # * `highest_score` lowest_score = int(sorted_wines[0]['points']) highest_score = 0 for wine in sorted_wines: if int(wine['points']) > highest_score: highest_score = int(wine['points']) if int(wine['points']) < lowest_score: lowest_score = int(wine['points']) statistics["highest_score"] = highest_score statistics["lowest_score"] = lowest_score # max_ = 0 min_ = most_expensive_wine['price'] max_rating = 0 min_rating = highest_score for country, lst in dict_country.items(): if lst[1]/lst[0] > max_: max_ = lst[1]/lst[0] most_expensive_coutry = country if lst[1]/lst[0] < min_: min_ = lst[1]/lst[0] cheapest_coutry = country # * `most_rated_country` # * `most_rated_country` if lst[2] > max_rating: max_rating = lst[2] most_rated_country = country if lst[2] < min_rating: min_rating = lst[2] underrated_country = country statistics['most_rated_country'] = most_rated_country statistics['underrated_country'] = underrated_country statistics['most_expensive_coutry'] = most_expensive_coutry statistics['cheapest_coutry'] = cheapest_coutry #------------------------------------------------------ max_ = max(dict_commentator.values()) most_active_commentator = [] for commentator,count_wines in dict_commentator.items(): if count_wines == max_ and commentator is not None: most_active_commentator.append(commentator) statistics["most_active_commentator"] = most_active_commentator #------------------------------------------------------- out_dict = {} out_dict["statistics"] = statistics print(out_dict) out_file(out_dict, 'stats.json', 'UTF-16') #--красивый MARKDOWN файл--------------- fout = open('selected_wines_statistics.txt', 'wt', encoding='utf-16') for variety, sort_wine in statistics['wine'].items(): print('variety:', variety, file=fout, sep='\t') print('average_price:',sort_wine['average_price'], file=fout, sep='\t') print('min_price:',sort_wine['min_price'], file=fout, sep='\t') print('max_price:',sort_wine['max_price'], file=fout, sep='\t') print('most_common_region:',sort_wine['most_common_region'], file=fout, sep='\t') print('most_common_country:', sort_wine['most_common_country'], file=fout, sep='\t') print('average_score:',sort_wine['average_score'], file=fout, sep='\t') print('------------------------------------------', file=fout) fout.close() print('файл selected_wines_statistics.txt создался успешно!') fout = open('common_statistics.txt', 'wt', encoding='utf-16') most_expensive_wine = statistics["most_expensive_wine"] print("most_expensive_wine:", most_expensive_wine, file=fout, sep='\t') print("cheapest_wine:", statistics["cheapest_wine"], file=fout, sep='\t') print("highest_score:", statistics["highest_score"], file=fout, sep='\t') print("lowest_score:", statistics["lowest_score"], file=fout, sep='\t') print("most_rated_country:", statistics["most_rated_country"], file=fout, sep='\t') print("underrated_country:", statistics["underrated_country"], file=fout, sep='\t') print("most_expensive_coutry:", statistics["most_expensive_coutry"], file=fout, sep='\t') print("cheapest_coutry:", statistics["cheapest_coutry"], file=fout, sep='\t') print("most_active_commentator:", statistics["most_active_commentator"], file=fout, sep='\t') fout.close() print('файл common_statistics.txt создался успешно!') main() print('time of execution: ', timeit.timeit("main()", number=3))
фай
identifier_name
hw.py
import os import json import random from collections import namedtuple import timeit MODE = 'set_and_namedtuple' #'simple' FILE_NAME1 = "winedata_1.json" FILE_NAME2 = "winedata_2.json" #преобразуем словарь в NamedTuple def from_dict_to_namedtuple(dict_): return namedtuple('Wine',dict_.keys())(**dict_) def from_namedtuple_to_dict(wine): d = wine._asdict() dict_ = dict(d) return dict_ def out_file(out_dict, out_filename, encoding_table): try: with open(out_filename, 'w', encoding=encoding_table) as f: json.dump(out_dict, f, ensure_ascii=False) print(f"файл {out_filename} создался успешно!") except: print("Ошибка при записи выходного файла JSON") def get_most_common_object(dict_): max_ = 0 for key,value in dict_.items(): if value > max_: max_ = value most_common_object = key return most_common_object def main(): if os.path.exists(FILE_NAME1): # Читаем JSON из файла и преобразуем к типу Python with open(FILE_NAME1, 'r', encoding='UTF-8') as f: read_data_1 = json.load(f) else: print(f"{FILE_NAME1} File not found!") if os.path.exists(FILE_NAME2): # Читаем JSON из файла и преобразуем к типу Python with open(FILE_NAME2, 'r', encoding='UTF-8') as f: read_data_2 = json.load(f) else: print(f"{FILE_NAME2} File not found!") if MODE == 'set_and_namedtuple': #--убираем дубликаты моим методом dict_wines = read_data_1 + read_data_2 wines = [] wines = [from_dict_to_namedtuple(dict_wine) for dict_wine in dict_wines] #уничтожение дубликатов wines = list(set(wines)) wines = [from_namedtuple_to_dict(wine) for wine in wines] else: #--убираем дубликаты методом вложенных циклов # read_data_1_new = [] # for dict_wine2 in read_data_1: # for dict_wine1 in read_data_1: # if dict_wine1 == dict_wine2: # break # else: # read_data_1_new.append(dict_wine2) read_data_2_new = [] for dict_wine2 in read_data_2: for dict_wine1 in read_data_2: if dict_wine1 == dict_wine2: break else: read_data_2_new.append(dict_wine2) read_data_1_new = read_data_1 for dict_wine2 in read_data_2_new: for dict_wine1 in read_data_1_new: if dict_wine1 == dict_wine2: break else: read_data_1.append(dict_wine2) wines = read_data_1 # [from_dict_to_namedtuple(dict_wine) for dict_wine in read_data_1] print("дляна результирующего списка без дублей", len(wines)) #сортировка # по цене в нисходящем порядке по убыванию (или по сорту в лексикографическом порядке) #мы преобразуем 2 числа (индекс и цену) в 1 число # w = len(wines) * 10 #вычисляем вес # (вес надо взять для цены побольше чтобы он дал наибольший вклад и перекрыл значение индекса) Index_table_price = [] for n, value in enumerate(wines, 0): try: if value['price']: price = int(value['price']) else: price = 0 except: print("TypeError: incorrect format of price!") index_price = n + price * w #вычисляем составной индекс Index_table_price.append(index_price) sorted_wines = [] #полученную индексную таблицу (аналог индекса в БД) сортируем #для этого используем встроенную функцию Index_table_price = sorted(Index_table_price, reverse=True) prev_price = Index_table_price[0] Index_table_title = [] for index_price in Index_table_price: # разлагаем обратно на индекс и цену price = index_price // w n = index_price % w #сделать группировку #получаем кучу маленьких списков if price != prev_price: #сформировали список с одинаковыми ценами #теперь сортируем по наименованию в лексикографическом порядке Index_table_title = sorted(Index_table_title) for index_title in Index_table_title: #разлагаем индекс на составные части lst = index_title.split('split') title = lst[0] j = int(lst[1]) sorted_wines.append(wines[j]) Index_table_title = [] # опять строим составной строчный индекс index_title = wines[n]['title'] + "split" + str(n) Index_table_title.append(index_title) prev_price = price out_file(sorted_wines, 'winedata_full.json', 'UTF-8') print("-------------------------------------------------------------------------------------------------") statistics = {} lst_varieties = ['Gewürztraminer', 'Riesling', 'Merlot', 'Madera', 'Tempranillo', 'Red Blend'] sort_wine = {} for variety in lst_varieties: selected_wines = [] for wine in sorted_wines: if variety == wine['variety']: selected_wines.append(wine) if not selected_wines: continue prices = [] score = [] dict_common_country = {} dict_common_region = {} for wine in selected_wines: if wine['country'] not in dict_common_country: dict_common_country[wine['country']] = 0 if wine['region_1'] not in dict_common_region: dict_common_region[wine['region_1']] = 0 dict_common_country[wine['country']] += 1 dict_common_region[wine['region_1']] += 1 prices.append(wine['price']) score.append(int(wine['points'])) average_price = round(sum(prices)/len(prices),1) max_price = max(prices) min_price = min(prices) min_price = min(prices) average_score = round(sum(score)/len(score),1) # * `most_common_region` где больше всего вин этого сорта производят ? # most_common_country #используем словарь most_common_country = get_most_common_object(dict_common_country) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! most_common_region = get_most_common_object(dict_common_region) variety = var
e') #вместо немецких вставить общепринятые символы sort_wine[variety] = {"average_price":0, "min_price":0, "max_price":0, "most_common_country":0, "most_common_region":0, "average_score":0} sort_wine[variety]["average_price"] = str(average_price) sort_wine[variety]["min_price"] = str(min_price) sort_wine[variety]["max_price"] = str(max_price) sort_wine[variety]["most_common_country"] = most_common_country sort_wine[variety]["most_common_region"] = most_common_region sort_wine[variety]["average_score"] = average_score statistics["wine"] = sort_wine # * `most_expensive_wine` в случае коллизий тут и далее делаем список. # * `cheapest_wine` # * `highest_score` # * `lowest_score` # * `most_expensive_coutry` в среднем самое дорогое вино среди стран # * `cheapest_coutry` в среднем самое дешевое вино среди стран # * `most_rated_country` # * `underrated_country` # * `most_active_commentator` #в случае коллизий тут и далее делаем список. most_expensive_wine = sorted_wines[0] cheapest_wine = sorted_wines[-1] lst_cheapest_wines = [] i = len(sorted_wines) - 1 while i >= 0: next_wine = sorted_wines[i] if cheapest_wine['price'] == next_wine['price']: lst_cheapest_wines.append(next_wine['title']) #.replace('â','ae').replace('ü','ue')) else: break i -= 1 statistics["most_expensive_wine"] = most_expensive_wine['title'] #.replace('â','ae').replace('ü','ue') #вместо немецких вставить общепринятые символы statistics["cheapest_wine"] = lst_cheapest_wines # * `most_active_commentator` # * `most_expensive_coutry` dict_commentator = {} dict_country = {} for wine in sorted_wines: if wine['taster_name'] not in dict_commentator: dict_commentator[wine['taster_name']] = 0 if wine['country'] not in dict_country: dict_country[wine['country']] = [0,0,0] dict_commentator[wine['taster_name']] += 1 dict_country[wine['country']][0] += 1 dict_country[wine['country']][1] += wine['price'] dict_country[wine['country']][2] += int(wine['points']) # * `highest_score` lowest_score = int(sorted_wines[0]['points']) highest_score = 0 for wine in sorted_wines: if int(wine['points']) > highest_score: highest_score = int(wine['points']) if int(wine['points']) < lowest_score: lowest_score = int(wine['points']) statistics["highest_score"] = highest_score statistics["lowest_score"] = lowest_score # max_ = 0 min_ = most_expensive_wine['price'] max_rating = 0 min_rating = highest_score for country, lst in dict_country.items(): if lst[1]/lst[0] > max_: max_ = lst[1]/lst[0] most_expensive_coutry = country if lst[1]/lst[0] < min_: min_ = lst[1]/lst[0] cheapest_coutry = country # * `most_rated_country` # * `most_rated_country` if lst[2] > max_rating: max_rating = lst[2] most_rated_country = country if lst[2] < min_rating: min_rating = lst[2] underrated_country = country statistics['most_rated_country'] = most_rated_country statistics['underrated_country'] = underrated_country statistics['most_expensive_coutry'] = most_expensive_coutry statistics['cheapest_coutry'] = cheapest_coutry #------------------------------------------------------ max_ = max(dict_commentator.values()) most_active_commentator = [] for commentator,count_wines in dict_commentator.items(): if count_wines == max_ and commentator is not None: most_active_commentator.append(commentator) statistics["most_active_commentator"] = most_active_commentator #------------------------------------------------------- out_dict = {} out_dict["statistics"] = statistics print(out_dict) out_file(out_dict, 'stats.json', 'UTF-16') #--красивый MARKDOWN файл--------------- fout = open('selected_wines_statistics.txt', 'wt', encoding='utf-16') for variety, sort_wine in statistics['wine'].items(): print('variety:', variety, file=fout, sep='\t') print('average_price:',sort_wine['average_price'], file=fout, sep='\t') print('min_price:',sort_wine['min_price'], file=fout, sep='\t') print('max_price:',sort_wine['max_price'], file=fout, sep='\t') print('most_common_region:',sort_wine['most_common_region'], file=fout, sep='\t') print('most_common_country:', sort_wine['most_common_country'], file=fout, sep='\t') print('average_score:',sort_wine['average_score'], file=fout, sep='\t') print('------------------------------------------', file=fout) fout.close() print('файл selected_wines_statistics.txt создался успешно!') fout = open('common_statistics.txt', 'wt', encoding='utf-16') most_expensive_wine = statistics["most_expensive_wine"] print("most_expensive_wine:", most_expensive_wine, file=fout, sep='\t') print("cheapest_wine:", statistics["cheapest_wine"], file=fout, sep='\t') print("highest_score:", statistics["highest_score"], file=fout, sep='\t') print("lowest_score:", statistics["lowest_score"], file=fout, sep='\t') print("most_rated_country:", statistics["most_rated_country"], file=fout, sep='\t') print("underrated_country:", statistics["underrated_country"], file=fout, sep='\t') print("most_expensive_coutry:", statistics["most_expensive_coutry"], file=fout, sep='\t') print("cheapest_coutry:", statistics["cheapest_coutry"], file=fout, sep='\t') print("most_active_commentator:", statistics["most_active_commentator"], file=fout, sep='\t') fout.close() print('файл common_statistics.txt создался успешно!') main() print('time of execution: ', timeit.timeit("main()", number=3))
iety #.replace('â','ae').replace('ü','u
conditional_block
brain-gan-parameter-search.py
""" Authors: Dan Mohler, Prad Ejner, Jordan Winkler Last updated: Sun Dec 15 15:22:17 EST 2019 Description: A generative adversarial network for generating images of brains """ # Deep Learning libraries for model from keras.models import Sequential, Model, load_model from keras.layers import UpSampling2D, Conv2D, Activation, BatchNormalization, Reshape, Dense, Input, LeakyReLU, Dropout, Flatten, ZeroPadding2D from keras.optimizers import Adam # Operating system and computational libraries for OS interfacing import glob,tqdm from PIL import Image import numpy as np import os import argparse from ast import literal_eval # Update for image save import imageio imsave = imageio.imwrite # To stop using up all of the RAM when running the models import tensorflow as tf import keras config = tf.ConfigProto() config.gpu_options.allow_growth = True keras.backend.tensorflow_backend.set_session(tf.Session(config=config)) # DCGAN : Deep Convolutional Generative Adversarial Network # # Main paper describing the process # Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks # https://arxiv.org/pdf/1511.06434.pdf class DCGAN: def __init__(self, discriminator_path, generator_path, output_directory, img_size, dropout, bn_momentum, adam_lr, adam_beta): self.img_size = img_size self.upsample_layers = 5 self.starting_filters = 64 self.kernel_size = 3 self.channels = 1 self.discriminator_path = discriminator_path self.generator_path = generator_path self.dropout = dropout self.bn_momentum = bn_momentum self.adam_lr = adam_lr self.adam_beta = adam_beta labels = f"/dropout={self.dropout} bn_momentum={self.bn_momentum} adam_lr={self.adam_lr} adam_beta={self.adam_beta}" self.output_directory = output_directory + labels def build_generator_model(self, noise_shape): model = Sequential() # This block of code can be a little daunting, but essentially it automatically calculates the required starting # array size that will be correctly upscaled to our desired image size. # # We have 5 Upsample2D layers which each double the images width and height, so we can determine the starting # x size by taking (x / 2^upsample_count) So for our target image size, 256x192, we do the following: # x = (192 / 2^5), y = (256 / 2^5) [x and y are reversed within the model] # We also need a 3rd dimension which is chosen relatively arbitrarily, in this case it's 64. model.add( Dense(self.starting_filters * (self.img_size[0] // (2 ** self.upsample_layers)) * (self.img_size[1] // (2 ** self.upsample_layers)), activation="relu", input_shape=noise_shape)) model.add(Reshape(((self.img_size[0] // (2 ** self.upsample_layers)), (self.img_size[1] // (2 ** self.upsample_layers)), self.starting_filters))) model.add(BatchNormalization(momentum=self.bn_momentum)) # Repeats the pattern of: stretching out data, mutating/shrinking data, and normalization model.add(UpSampling2D()) # 6x8 -> 12x16 model.add(Conv2D(1024, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 12x16 -> 24x32 model.add(Conv2D(512, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 24x32 -> 48x64 model.add(Conv2D(256, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 48x64 -> 96x128 model.add(Conv2D(128, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 96x128 -> 192x256 model.add(Conv2D(64, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(Conv2D(32, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(Conv2D(self.channels, kernel_size=self.kernel_size, padding="same")) model.add(Activation("tanh")) model.summary() return model def build_discriminator_model(self, img_shape): model = Sequential() model.add(Conv2D(32, kernel_size=self.kernel_size, strides=2, input_shape=img_shape, padding="same")) # 192x256 -> 96x128 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(Conv2D(64, kernel_size=self.kernel_size, strides=2, padding="same")) # 96x128 -> 48x64 model.add(ZeroPadding2D(padding=((0, 1), (0, 1)))) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, kernel_size=self.kernel_size, strides=2, padding="same")) # 48x64 -> 24x32 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, kernel_size=self.kernel_size, strides=1, padding="same")) # 24x32 -> 12x16 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(Conv2D(512, kernel_size=self.kernel_size, strides=1, padding="same")) # 12x16 -> 6x8 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.summary() return model def build_generator(self): noise_shape = (100,) model = self.build_generator_model(noise_shape) noise = Input(shape=noise_shape) img = model(noise) return Model(noise, img) def build_discriminator(self): img_shape = (self.img_size[0], self.img_size[1], self.channels) model = self.build_discriminator_model(img_shape) img = Input(shape=img_shape) validity = model(img) return Model(img, validity) def build_gan(self): optimizer = Adam(self.adam_lr, self.adam_beta) # See if the specified model paths exist, if they don't then we start training new models if os.path.exists(self.discriminator_path) and os.path.exists(self.generator_path): self.discriminator = load_model(self.discriminator_path) self.generator = load_model(self.generator_path) print("Loaded models...") else: self.discriminator = self.build_discriminator() self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) self.generator = self.build_generator() self.generator.compile(loss='binary_crossentropy', optimizer=optimizer) # These next few lines setup the training for the GAN model z = Input(shape=(100,)) img = self.generator(z) self.discriminator.trainable = False valid = self.discriminator(img) self.combined = Model(z, valid) self.combined.compile(loss='binary_crossentropy', optimizer=optimizer) def load_imgs(self, image_path): X_train = [] for i in glob.glob(image_path): img = Image.open(i) img = np.asarray(img) img = img.reshape(self.img_size[0], self.img_size[1], self.channels) X_train.append(img) return np.asarray(X_train) def pix_array_convert(self, img): img_array = np.asarray(img) if np.max(img_array) > 1 or np.min(img_array) < 0: print("Picture array outside expected value") exit() else:
return(img_array) def train(self, epochs, image_path, batch_size=32, save_interval=50): self.build_gan() X_train = self.load_imgs(image_path) print("Training Data Shape: ", X_train.shape) # Rescale images from -1 to 1 X_train = (X_train.astype(np.float32) - 127.5) / 127.5 half_batch = batch_size // 2 # Training Loop prog_bar = tqdm.tqdm(range(epochs)) prog_bar.ncols = 20 for epoch in prog_bar: # Train Generator noise = np.random.normal(0, 1, (batch_size, 100)) g_loss = self.combined.train_on_batch(noise, np.ones((batch_size, 1))) # Train Discriminator idx = np.random.randint(0, X_train.shape[0], half_batch) imgs = X_train[idx] #imgs = np.expand_dims(imgs, axis=4) # Added by Jordan to get tif dimensions to fit # Sample noise and generate a half batch of new images noise = np.random.normal(0, 1, (half_batch, 100)) gen_imgs = self.generator.predict(noise) # Train the discriminator (real classified as ones and generated as zeros) d_loss_real = self.discriminator.train_on_batch(imgs, np.ones((half_batch, 1))) d_loss_fake = self.discriminator.train_on_batch(gen_imgs, np.zeros((half_batch, 1))) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # Print progress prog_bar.set_description(("D loss: " + format(d_loss[0], "^-06.3f") + " | D Accuracy: " + format(d_loss[1], "^-06.3f") +" | G loss: " + format(g_loss, "^-06.3f"))) #prog_bar.set_description(f"{epoch} [D loss: {d_loss[0]} | D Accuracy: {100 * d_loss[1]}] [G loss: {g_loss}]") #print(f"{epoch} [D loss: {d_loss[0]} | D Accuracy: {100 * d_loss[1]}] [G loss: {g_loss}]") self.save_imgs(save_interval) save_path = self.output_directory + "/models" if not os.path.exists(save_path): os.makedirs(save_path) self.discriminator.save(save_path + f"/{epoch} discrim.h5") self.generator.save(save_path + f"/{epoch} generat.h5") def gene_imgs(self, count): " Generate images from the currently loaded model" noise = np.random.normal(0, 1, (count, 100)) return self.generator.predict(noise) def save_imgs(self, epoch): " Generates n images from the model, and saves them" n = 1019 imgs = self.gene_imgs(n) imgs = 0.5 * imgs + 0.5 for i, img_array in enumerate(imgs): path = f"{self.output_directory}/generated" if not os.path.exists(path): os.makedirs(path) imsave(path + f"/{epoch}_{i}.png", self.pix_array_convert(img_array)) def generate_imgs(self, count, threshold, modifier): """ Generates (count) images from the model ensuring the discriminator scores them between the threshold values and saves them """ self.build_gan() imgs = [] for i in range(count): score = [0] while not(threshold[0] < score[0] < threshold[1]): img = self.gene_imgs(1) score = self.discriminator.predict(img) print("Image found: ", score[0]) imgs.append(img) imgs = np.asarray(imgs).squeeze() imgs = 0.5 * imgs + 0.5 print(imgs.shape) for i, img_array in enumerate(imgs): path = f"{self.output_directory}/generated_{threshold[0]}_{threshold[1]}" if not os.path.exists(path): os.makedirs(path) imsave(path + f"/{modifier}_{i}.png", self.pix_array_convert(img_array)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--load_generator', help='Path to existing generator weights file', default="data/models/generat.h5") parser.add_argument('--load_discriminator', help='Path to existing discriminator weights file', default="data/models/discrim.h5") parser.add_argument('--data', help='Path to directory of images of correct dimensions, using *.[filetype] (e.g. *.png) to reference images', default="128_128/*.tif") parser.add_argument('--sample', help='If given, will generate that many samples from existing model instead of training', default=-1) parser.add_argument('--sample_thresholds', help='The values between which a generated image must score from the discriminator', default="(0.0, 0.1)") parser.add_argument('--batch_size', help='Number of images to train on at once', default=24) parser.add_argument('--image_size', help='Size of images as tuple (height,width). Height and width must both be divisible by (2^5)', default="(128, 128)") parser.add_argument('--epochs', help='Number of epochs to train for', default=50000) parser.add_argument('--save_interval', help='How many epochs to go between saves/outputs', default=50000) parser.add_argument('--output_directory', help="Directory to save weights and images to.", default=f"data/output/") parser.add_argument('--dropout', help="Dropout rate for Discriminator", default=0.25) parser.add_argument('--bn_momentum', help="Batch normalization momentum for generator", default=0.80) parser.add_argument('--adam_lr', help="Learning rate for optimizer (both models)", default=0.0002) parser.add_argument('--adam_beta', help="Beta parameter for optimizer (both models)", default=0.5) args = parser.parse_args() dcgan = DCGAN(args.load_discriminator, args.load_generator, args.output_directory, literal_eval(args.image_size), float(args.dropout), float(args.bn_momentum), float(args.adam_lr), float(args.adam_beta)) if args.sample == -1: dcgan.train(epochs=int(args.epochs), image_path=args.data, batch_size=int(args.batch_size), save_interval=int(args.save_interval)) else: dcgan.generate_imgs(int(args.sample), literal_eval(args.sample_thresholds), "")
img_array = np.uint8(255*img_array)
conditional_block
brain-gan-parameter-search.py
""" Authors: Dan Mohler, Prad Ejner, Jordan Winkler Last updated: Sun Dec 15 15:22:17 EST 2019 Description: A generative adversarial network for generating images of brains """ # Deep Learning libraries for model from keras.models import Sequential, Model, load_model from keras.layers import UpSampling2D, Conv2D, Activation, BatchNormalization, Reshape, Dense, Input, LeakyReLU, Dropout, Flatten, ZeroPadding2D from keras.optimizers import Adam # Operating system and computational libraries for OS interfacing import glob,tqdm from PIL import Image import numpy as np import os import argparse from ast import literal_eval # Update for image save import imageio imsave = imageio.imwrite # To stop using up all of the RAM when running the models import tensorflow as tf import keras config = tf.ConfigProto() config.gpu_options.allow_growth = True keras.backend.tensorflow_backend.set_session(tf.Session(config=config)) # DCGAN : Deep Convolutional Generative Adversarial Network # # Main paper describing the process # Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks # https://arxiv.org/pdf/1511.06434.pdf class DCGAN:
if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--load_generator', help='Path to existing generator weights file', default="data/models/generat.h5") parser.add_argument('--load_discriminator', help='Path to existing discriminator weights file', default="data/models/discrim.h5") parser.add_argument('--data', help='Path to directory of images of correct dimensions, using *.[filetype] (e.g. *.png) to reference images', default="128_128/*.tif") parser.add_argument('--sample', help='If given, will generate that many samples from existing model instead of training', default=-1) parser.add_argument('--sample_thresholds', help='The values between which a generated image must score from the discriminator', default="(0.0, 0.1)") parser.add_argument('--batch_size', help='Number of images to train on at once', default=24) parser.add_argument('--image_size', help='Size of images as tuple (height,width). Height and width must both be divisible by (2^5)', default="(128, 128)") parser.add_argument('--epochs', help='Number of epochs to train for', default=50000) parser.add_argument('--save_interval', help='How many epochs to go between saves/outputs', default=50000) parser.add_argument('--output_directory', help="Directory to save weights and images to.", default=f"data/output/") parser.add_argument('--dropout', help="Dropout rate for Discriminator", default=0.25) parser.add_argument('--bn_momentum', help="Batch normalization momentum for generator", default=0.80) parser.add_argument('--adam_lr', help="Learning rate for optimizer (both models)", default=0.0002) parser.add_argument('--adam_beta', help="Beta parameter for optimizer (both models)", default=0.5) args = parser.parse_args() dcgan = DCGAN(args.load_discriminator, args.load_generator, args.output_directory, literal_eval(args.image_size), float(args.dropout), float(args.bn_momentum), float(args.adam_lr), float(args.adam_beta)) if args.sample == -1: dcgan.train(epochs=int(args.epochs), image_path=args.data, batch_size=int(args.batch_size), save_interval=int(args.save_interval)) else: dcgan.generate_imgs(int(args.sample), literal_eval(args.sample_thresholds), "")
def __init__(self, discriminator_path, generator_path, output_directory, img_size, dropout, bn_momentum, adam_lr, adam_beta): self.img_size = img_size self.upsample_layers = 5 self.starting_filters = 64 self.kernel_size = 3 self.channels = 1 self.discriminator_path = discriminator_path self.generator_path = generator_path self.dropout = dropout self.bn_momentum = bn_momentum self.adam_lr = adam_lr self.adam_beta = adam_beta labels = f"/dropout={self.dropout} bn_momentum={self.bn_momentum} adam_lr={self.adam_lr} adam_beta={self.adam_beta}" self.output_directory = output_directory + labels def build_generator_model(self, noise_shape): model = Sequential() # This block of code can be a little daunting, but essentially it automatically calculates the required starting # array size that will be correctly upscaled to our desired image size. # # We have 5 Upsample2D layers which each double the images width and height, so we can determine the starting # x size by taking (x / 2^upsample_count) So for our target image size, 256x192, we do the following: # x = (192 / 2^5), y = (256 / 2^5) [x and y are reversed within the model] # We also need a 3rd dimension which is chosen relatively arbitrarily, in this case it's 64. model.add( Dense(self.starting_filters * (self.img_size[0] // (2 ** self.upsample_layers)) * (self.img_size[1] // (2 ** self.upsample_layers)), activation="relu", input_shape=noise_shape)) model.add(Reshape(((self.img_size[0] // (2 ** self.upsample_layers)), (self.img_size[1] // (2 ** self.upsample_layers)), self.starting_filters))) model.add(BatchNormalization(momentum=self.bn_momentum)) # Repeats the pattern of: stretching out data, mutating/shrinking data, and normalization model.add(UpSampling2D()) # 6x8 -> 12x16 model.add(Conv2D(1024, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 12x16 -> 24x32 model.add(Conv2D(512, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 24x32 -> 48x64 model.add(Conv2D(256, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 48x64 -> 96x128 model.add(Conv2D(128, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 96x128 -> 192x256 model.add(Conv2D(64, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(Conv2D(32, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(Conv2D(self.channels, kernel_size=self.kernel_size, padding="same")) model.add(Activation("tanh")) model.summary() return model def build_discriminator_model(self, img_shape): model = Sequential() model.add(Conv2D(32, kernel_size=self.kernel_size, strides=2, input_shape=img_shape, padding="same")) # 192x256 -> 96x128 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(Conv2D(64, kernel_size=self.kernel_size, strides=2, padding="same")) # 96x128 -> 48x64 model.add(ZeroPadding2D(padding=((0, 1), (0, 1)))) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, kernel_size=self.kernel_size, strides=2, padding="same")) # 48x64 -> 24x32 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, kernel_size=self.kernel_size, strides=1, padding="same")) # 24x32 -> 12x16 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(Conv2D(512, kernel_size=self.kernel_size, strides=1, padding="same")) # 12x16 -> 6x8 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.summary() return model def build_generator(self): noise_shape = (100,) model = self.build_generator_model(noise_shape) noise = Input(shape=noise_shape) img = model(noise) return Model(noise, img) def build_discriminator(self): img_shape = (self.img_size[0], self.img_size[1], self.channels) model = self.build_discriminator_model(img_shape) img = Input(shape=img_shape) validity = model(img) return Model(img, validity) def build_gan(self): optimizer = Adam(self.adam_lr, self.adam_beta) # See if the specified model paths exist, if they don't then we start training new models if os.path.exists(self.discriminator_path) and os.path.exists(self.generator_path): self.discriminator = load_model(self.discriminator_path) self.generator = load_model(self.generator_path) print("Loaded models...") else: self.discriminator = self.build_discriminator() self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) self.generator = self.build_generator() self.generator.compile(loss='binary_crossentropy', optimizer=optimizer) # These next few lines setup the training for the GAN model z = Input(shape=(100,)) img = self.generator(z) self.discriminator.trainable = False valid = self.discriminator(img) self.combined = Model(z, valid) self.combined.compile(loss='binary_crossentropy', optimizer=optimizer) def load_imgs(self, image_path): X_train = [] for i in glob.glob(image_path): img = Image.open(i) img = np.asarray(img) img = img.reshape(self.img_size[0], self.img_size[1], self.channels) X_train.append(img) return np.asarray(X_train) def pix_array_convert(self, img): img_array = np.asarray(img) if np.max(img_array) > 1 or np.min(img_array) < 0: print("Picture array outside expected value") exit() else: img_array = np.uint8(255*img_array) return(img_array) def train(self, epochs, image_path, batch_size=32, save_interval=50): self.build_gan() X_train = self.load_imgs(image_path) print("Training Data Shape: ", X_train.shape) # Rescale images from -1 to 1 X_train = (X_train.astype(np.float32) - 127.5) / 127.5 half_batch = batch_size // 2 # Training Loop prog_bar = tqdm.tqdm(range(epochs)) prog_bar.ncols = 20 for epoch in prog_bar: # Train Generator noise = np.random.normal(0, 1, (batch_size, 100)) g_loss = self.combined.train_on_batch(noise, np.ones((batch_size, 1))) # Train Discriminator idx = np.random.randint(0, X_train.shape[0], half_batch) imgs = X_train[idx] #imgs = np.expand_dims(imgs, axis=4) # Added by Jordan to get tif dimensions to fit # Sample noise and generate a half batch of new images noise = np.random.normal(0, 1, (half_batch, 100)) gen_imgs = self.generator.predict(noise) # Train the discriminator (real classified as ones and generated as zeros) d_loss_real = self.discriminator.train_on_batch(imgs, np.ones((half_batch, 1))) d_loss_fake = self.discriminator.train_on_batch(gen_imgs, np.zeros((half_batch, 1))) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # Print progress prog_bar.set_description(("D loss: " + format(d_loss[0], "^-06.3f") + " | D Accuracy: " + format(d_loss[1], "^-06.3f") +" | G loss: " + format(g_loss, "^-06.3f"))) #prog_bar.set_description(f"{epoch} [D loss: {d_loss[0]} | D Accuracy: {100 * d_loss[1]}] [G loss: {g_loss}]") #print(f"{epoch} [D loss: {d_loss[0]} | D Accuracy: {100 * d_loss[1]}] [G loss: {g_loss}]") self.save_imgs(save_interval) save_path = self.output_directory + "/models" if not os.path.exists(save_path): os.makedirs(save_path) self.discriminator.save(save_path + f"/{epoch} discrim.h5") self.generator.save(save_path + f"/{epoch} generat.h5") def gene_imgs(self, count): " Generate images from the currently loaded model" noise = np.random.normal(0, 1, (count, 100)) return self.generator.predict(noise) def save_imgs(self, epoch): " Generates n images from the model, and saves them" n = 1019 imgs = self.gene_imgs(n) imgs = 0.5 * imgs + 0.5 for i, img_array in enumerate(imgs): path = f"{self.output_directory}/generated" if not os.path.exists(path): os.makedirs(path) imsave(path + f"/{epoch}_{i}.png", self.pix_array_convert(img_array)) def generate_imgs(self, count, threshold, modifier): """ Generates (count) images from the model ensuring the discriminator scores them between the threshold values and saves them """ self.build_gan() imgs = [] for i in range(count): score = [0] while not(threshold[0] < score[0] < threshold[1]): img = self.gene_imgs(1) score = self.discriminator.predict(img) print("Image found: ", score[0]) imgs.append(img) imgs = np.asarray(imgs).squeeze() imgs = 0.5 * imgs + 0.5 print(imgs.shape) for i, img_array in enumerate(imgs): path = f"{self.output_directory}/generated_{threshold[0]}_{threshold[1]}" if not os.path.exists(path): os.makedirs(path) imsave(path + f"/{modifier}_{i}.png", self.pix_array_convert(img_array))
identifier_body
brain-gan-parameter-search.py
""" Authors: Dan Mohler, Prad Ejner, Jordan Winkler Last updated: Sun Dec 15 15:22:17 EST 2019 Description: A generative adversarial network for generating images of brains """ # Deep Learning libraries for model from keras.models import Sequential, Model, load_model from keras.layers import UpSampling2D, Conv2D, Activation, BatchNormalization, Reshape, Dense, Input, LeakyReLU, Dropout, Flatten, ZeroPadding2D from keras.optimizers import Adam # Operating system and computational libraries for OS interfacing import glob,tqdm from PIL import Image import numpy as np import os import argparse from ast import literal_eval # Update for image save import imageio imsave = imageio.imwrite # To stop using up all of the RAM when running the models import tensorflow as tf import keras config = tf.ConfigProto() config.gpu_options.allow_growth = True keras.backend.tensorflow_backend.set_session(tf.Session(config=config)) # DCGAN : Deep Convolutional Generative Adversarial Network # # Main paper describing the process # Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks # https://arxiv.org/pdf/1511.06434.pdf class DCGAN: def
(self, discriminator_path, generator_path, output_directory, img_size, dropout, bn_momentum, adam_lr, adam_beta): self.img_size = img_size self.upsample_layers = 5 self.starting_filters = 64 self.kernel_size = 3 self.channels = 1 self.discriminator_path = discriminator_path self.generator_path = generator_path self.dropout = dropout self.bn_momentum = bn_momentum self.adam_lr = adam_lr self.adam_beta = adam_beta labels = f"/dropout={self.dropout} bn_momentum={self.bn_momentum} adam_lr={self.adam_lr} adam_beta={self.adam_beta}" self.output_directory = output_directory + labels def build_generator_model(self, noise_shape): model = Sequential() # This block of code can be a little daunting, but essentially it automatically calculates the required starting # array size that will be correctly upscaled to our desired image size. # # We have 5 Upsample2D layers which each double the images width and height, so we can determine the starting # x size by taking (x / 2^upsample_count) So for our target image size, 256x192, we do the following: # x = (192 / 2^5), y = (256 / 2^5) [x and y are reversed within the model] # We also need a 3rd dimension which is chosen relatively arbitrarily, in this case it's 64. model.add( Dense(self.starting_filters * (self.img_size[0] // (2 ** self.upsample_layers)) * (self.img_size[1] // (2 ** self.upsample_layers)), activation="relu", input_shape=noise_shape)) model.add(Reshape(((self.img_size[0] // (2 ** self.upsample_layers)), (self.img_size[1] // (2 ** self.upsample_layers)), self.starting_filters))) model.add(BatchNormalization(momentum=self.bn_momentum)) # Repeats the pattern of: stretching out data, mutating/shrinking data, and normalization model.add(UpSampling2D()) # 6x8 -> 12x16 model.add(Conv2D(1024, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 12x16 -> 24x32 model.add(Conv2D(512, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 24x32 -> 48x64 model.add(Conv2D(256, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 48x64 -> 96x128 model.add(Conv2D(128, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 96x128 -> 192x256 model.add(Conv2D(64, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(Conv2D(32, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(Conv2D(self.channels, kernel_size=self.kernel_size, padding="same")) model.add(Activation("tanh")) model.summary() return model def build_discriminator_model(self, img_shape): model = Sequential() model.add(Conv2D(32, kernel_size=self.kernel_size, strides=2, input_shape=img_shape, padding="same")) # 192x256 -> 96x128 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(Conv2D(64, kernel_size=self.kernel_size, strides=2, padding="same")) # 96x128 -> 48x64 model.add(ZeroPadding2D(padding=((0, 1), (0, 1)))) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, kernel_size=self.kernel_size, strides=2, padding="same")) # 48x64 -> 24x32 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, kernel_size=self.kernel_size, strides=1, padding="same")) # 24x32 -> 12x16 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(Conv2D(512, kernel_size=self.kernel_size, strides=1, padding="same")) # 12x16 -> 6x8 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.summary() return model def build_generator(self): noise_shape = (100,) model = self.build_generator_model(noise_shape) noise = Input(shape=noise_shape) img = model(noise) return Model(noise, img) def build_discriminator(self): img_shape = (self.img_size[0], self.img_size[1], self.channels) model = self.build_discriminator_model(img_shape) img = Input(shape=img_shape) validity = model(img) return Model(img, validity) def build_gan(self): optimizer = Adam(self.adam_lr, self.adam_beta) # See if the specified model paths exist, if they don't then we start training new models if os.path.exists(self.discriminator_path) and os.path.exists(self.generator_path): self.discriminator = load_model(self.discriminator_path) self.generator = load_model(self.generator_path) print("Loaded models...") else: self.discriminator = self.build_discriminator() self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) self.generator = self.build_generator() self.generator.compile(loss='binary_crossentropy', optimizer=optimizer) # These next few lines setup the training for the GAN model z = Input(shape=(100,)) img = self.generator(z) self.discriminator.trainable = False valid = self.discriminator(img) self.combined = Model(z, valid) self.combined.compile(loss='binary_crossentropy', optimizer=optimizer) def load_imgs(self, image_path): X_train = [] for i in glob.glob(image_path): img = Image.open(i) img = np.asarray(img) img = img.reshape(self.img_size[0], self.img_size[1], self.channels) X_train.append(img) return np.asarray(X_train) def pix_array_convert(self, img): img_array = np.asarray(img) if np.max(img_array) > 1 or np.min(img_array) < 0: print("Picture array outside expected value") exit() else: img_array = np.uint8(255*img_array) return(img_array) def train(self, epochs, image_path, batch_size=32, save_interval=50): self.build_gan() X_train = self.load_imgs(image_path) print("Training Data Shape: ", X_train.shape) # Rescale images from -1 to 1 X_train = (X_train.astype(np.float32) - 127.5) / 127.5 half_batch = batch_size // 2 # Training Loop prog_bar = tqdm.tqdm(range(epochs)) prog_bar.ncols = 20 for epoch in prog_bar: # Train Generator noise = np.random.normal(0, 1, (batch_size, 100)) g_loss = self.combined.train_on_batch(noise, np.ones((batch_size, 1))) # Train Discriminator idx = np.random.randint(0, X_train.shape[0], half_batch) imgs = X_train[idx] #imgs = np.expand_dims(imgs, axis=4) # Added by Jordan to get tif dimensions to fit # Sample noise and generate a half batch of new images noise = np.random.normal(0, 1, (half_batch, 100)) gen_imgs = self.generator.predict(noise) # Train the discriminator (real classified as ones and generated as zeros) d_loss_real = self.discriminator.train_on_batch(imgs, np.ones((half_batch, 1))) d_loss_fake = self.discriminator.train_on_batch(gen_imgs, np.zeros((half_batch, 1))) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # Print progress prog_bar.set_description(("D loss: " + format(d_loss[0], "^-06.3f") + " | D Accuracy: " + format(d_loss[1], "^-06.3f") +" | G loss: " + format(g_loss, "^-06.3f"))) #prog_bar.set_description(f"{epoch} [D loss: {d_loss[0]} | D Accuracy: {100 * d_loss[1]}] [G loss: {g_loss}]") #print(f"{epoch} [D loss: {d_loss[0]} | D Accuracy: {100 * d_loss[1]}] [G loss: {g_loss}]") self.save_imgs(save_interval) save_path = self.output_directory + "/models" if not os.path.exists(save_path): os.makedirs(save_path) self.discriminator.save(save_path + f"/{epoch} discrim.h5") self.generator.save(save_path + f"/{epoch} generat.h5") def gene_imgs(self, count): " Generate images from the currently loaded model" noise = np.random.normal(0, 1, (count, 100)) return self.generator.predict(noise) def save_imgs(self, epoch): " Generates n images from the model, and saves them" n = 1019 imgs = self.gene_imgs(n) imgs = 0.5 * imgs + 0.5 for i, img_array in enumerate(imgs): path = f"{self.output_directory}/generated" if not os.path.exists(path): os.makedirs(path) imsave(path + f"/{epoch}_{i}.png", self.pix_array_convert(img_array)) def generate_imgs(self, count, threshold, modifier): """ Generates (count) images from the model ensuring the discriminator scores them between the threshold values and saves them """ self.build_gan() imgs = [] for i in range(count): score = [0] while not(threshold[0] < score[0] < threshold[1]): img = self.gene_imgs(1) score = self.discriminator.predict(img) print("Image found: ", score[0]) imgs.append(img) imgs = np.asarray(imgs).squeeze() imgs = 0.5 * imgs + 0.5 print(imgs.shape) for i, img_array in enumerate(imgs): path = f"{self.output_directory}/generated_{threshold[0]}_{threshold[1]}" if not os.path.exists(path): os.makedirs(path) imsave(path + f"/{modifier}_{i}.png", self.pix_array_convert(img_array)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--load_generator', help='Path to existing generator weights file', default="data/models/generat.h5") parser.add_argument('--load_discriminator', help='Path to existing discriminator weights file', default="data/models/discrim.h5") parser.add_argument('--data', help='Path to directory of images of correct dimensions, using *.[filetype] (e.g. *.png) to reference images', default="128_128/*.tif") parser.add_argument('--sample', help='If given, will generate that many samples from existing model instead of training', default=-1) parser.add_argument('--sample_thresholds', help='The values between which a generated image must score from the discriminator', default="(0.0, 0.1)") parser.add_argument('--batch_size', help='Number of images to train on at once', default=24) parser.add_argument('--image_size', help='Size of images as tuple (height,width). Height and width must both be divisible by (2^5)', default="(128, 128)") parser.add_argument('--epochs', help='Number of epochs to train for', default=50000) parser.add_argument('--save_interval', help='How many epochs to go between saves/outputs', default=50000) parser.add_argument('--output_directory', help="Directory to save weights and images to.", default=f"data/output/") parser.add_argument('--dropout', help="Dropout rate for Discriminator", default=0.25) parser.add_argument('--bn_momentum', help="Batch normalization momentum for generator", default=0.80) parser.add_argument('--adam_lr', help="Learning rate for optimizer (both models)", default=0.0002) parser.add_argument('--adam_beta', help="Beta parameter for optimizer (both models)", default=0.5) args = parser.parse_args() dcgan = DCGAN(args.load_discriminator, args.load_generator, args.output_directory, literal_eval(args.image_size), float(args.dropout), float(args.bn_momentum), float(args.adam_lr), float(args.adam_beta)) if args.sample == -1: dcgan.train(epochs=int(args.epochs), image_path=args.data, batch_size=int(args.batch_size), save_interval=int(args.save_interval)) else: dcgan.generate_imgs(int(args.sample), literal_eval(args.sample_thresholds), "")
__init__
identifier_name
brain-gan-parameter-search.py
""" Authors: Dan Mohler, Prad Ejner, Jordan Winkler Last updated: Sun Dec 15 15:22:17 EST 2019 Description: A generative adversarial network for generating images of brains """ # Deep Learning libraries for model from keras.models import Sequential, Model, load_model from keras.layers import UpSampling2D, Conv2D, Activation, BatchNormalization, Reshape, Dense, Input, LeakyReLU, Dropout, Flatten, ZeroPadding2D from keras.optimizers import Adam # Operating system and computational libraries for OS interfacing import glob,tqdm from PIL import Image import numpy as np import os import argparse from ast import literal_eval # Update for image save import imageio imsave = imageio.imwrite # To stop using up all of the RAM when running the models import tensorflow as tf import keras config = tf.ConfigProto() config.gpu_options.allow_growth = True keras.backend.tensorflow_backend.set_session(tf.Session(config=config)) # DCGAN : Deep Convolutional Generative Adversarial Network # # Main paper describing the process # Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks # https://arxiv.org/pdf/1511.06434.pdf class DCGAN: def __init__(self, discriminator_path, generator_path, output_directory, img_size, dropout, bn_momentum, adam_lr, adam_beta): self.img_size = img_size self.upsample_layers = 5 self.starting_filters = 64 self.kernel_size = 3 self.channels = 1 self.discriminator_path = discriminator_path self.generator_path = generator_path self.dropout = dropout self.bn_momentum = bn_momentum self.adam_lr = adam_lr self.adam_beta = adam_beta labels = f"/dropout={self.dropout} bn_momentum={self.bn_momentum} adam_lr={self.adam_lr} adam_beta={self.adam_beta}" self.output_directory = output_directory + labels def build_generator_model(self, noise_shape): model = Sequential() # This block of code can be a little daunting, but essentially it automatically calculates the required starting # array size that will be correctly upscaled to our desired image size. # # We have 5 Upsample2D layers which each double the images width and height, so we can determine the starting # x size by taking (x / 2^upsample_count) So for our target image size, 256x192, we do the following: # x = (192 / 2^5), y = (256 / 2^5) [x and y are reversed within the model] # We also need a 3rd dimension which is chosen relatively arbitrarily, in this case it's 64. model.add( Dense(self.starting_filters * (self.img_size[0] // (2 ** self.upsample_layers)) * (self.img_size[1] // (2 ** self.upsample_layers)), activation="relu", input_shape=noise_shape)) model.add(Reshape(((self.img_size[0] // (2 ** self.upsample_layers)), (self.img_size[1] // (2 ** self.upsample_layers)), self.starting_filters))) model.add(BatchNormalization(momentum=self.bn_momentum)) # Repeats the pattern of: stretching out data, mutating/shrinking data, and normalization model.add(UpSampling2D()) # 6x8 -> 12x16 model.add(Conv2D(1024, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 12x16 -> 24x32 model.add(Conv2D(512, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 24x32 -> 48x64 model.add(Conv2D(256, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 48x64 -> 96x128 model.add(Conv2D(128, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(UpSampling2D()) # 96x128 -> 192x256 model.add(Conv2D(64, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(Conv2D(32, kernel_size=self.kernel_size, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=self.bn_momentum)) model.add(Conv2D(self.channels, kernel_size=self.kernel_size, padding="same")) model.add(Activation("tanh")) model.summary() return model def build_discriminator_model(self, img_shape): model = Sequential() model.add(Conv2D(32, kernel_size=self.kernel_size, strides=2, input_shape=img_shape, padding="same")) # 192x256 -> 96x128 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(Conv2D(64, kernel_size=self.kernel_size, strides=2, padding="same")) # 96x128 -> 48x64 model.add(ZeroPadding2D(padding=((0, 1), (0, 1)))) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, kernel_size=self.kernel_size, strides=2, padding="same")) # 48x64 -> 24x32 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, kernel_size=self.kernel_size, strides=1, padding="same")) # 24x32 -> 12x16 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(Conv2D(512, kernel_size=self.kernel_size, strides=1, padding="same")) # 12x16 -> 6x8 model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(self.dropout)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.summary() return model def build_generator(self): noise_shape = (100,) model = self.build_generator_model(noise_shape) noise = Input(shape=noise_shape) img = model(noise) return Model(noise, img) def build_discriminator(self): img_shape = (self.img_size[0], self.img_size[1], self.channels) model = self.build_discriminator_model(img_shape) img = Input(shape=img_shape) validity = model(img) return Model(img, validity) def build_gan(self): optimizer = Adam(self.adam_lr, self.adam_beta) # See if the specified model paths exist, if they don't then we start training new models if os.path.exists(self.discriminator_path) and os.path.exists(self.generator_path): self.discriminator = load_model(self.discriminator_path) self.generator = load_model(self.generator_path) print("Loaded models...") else: self.discriminator = self.build_discriminator() self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) self.generator = self.build_generator() self.generator.compile(loss='binary_crossentropy', optimizer=optimizer) # These next few lines setup the training for the GAN model z = Input(shape=(100,)) img = self.generator(z) self.discriminator.trainable = False valid = self.discriminator(img) self.combined = Model(z, valid) self.combined.compile(loss='binary_crossentropy', optimizer=optimizer) def load_imgs(self, image_path): X_train = [] for i in glob.glob(image_path): img = Image.open(i) img = np.asarray(img) img = img.reshape(self.img_size[0], self.img_size[1], self.channels) X_train.append(img) return np.asarray(X_train) def pix_array_convert(self, img): img_array = np.asarray(img) if np.max(img_array) > 1 or np.min(img_array) < 0: print("Picture array outside expected value") exit() else: img_array = np.uint8(255*img_array) return(img_array) def train(self, epochs, image_path, batch_size=32, save_interval=50): self.build_gan() X_train = self.load_imgs(image_path) print("Training Data Shape: ", X_train.shape) # Rescale images from -1 to 1 X_train = (X_train.astype(np.float32) - 127.5) / 127.5 half_batch = batch_size // 2 # Training Loop prog_bar = tqdm.tqdm(range(epochs)) prog_bar.ncols = 20 for epoch in prog_bar: # Train Generator noise = np.random.normal(0, 1, (batch_size, 100)) g_loss = self.combined.train_on_batch(noise, np.ones((batch_size, 1))) # Train Discriminator idx = np.random.randint(0, X_train.shape[0], half_batch) imgs = X_train[idx] #imgs = np.expand_dims(imgs, axis=4) # Added by Jordan to get tif dimensions to fit # Sample noise and generate a half batch of new images noise = np.random.normal(0, 1, (half_batch, 100)) gen_imgs = self.generator.predict(noise)
# Print progress prog_bar.set_description(("D loss: " + format(d_loss[0], "^-06.3f") + " | D Accuracy: " + format(d_loss[1], "^-06.3f") +" | G loss: " + format(g_loss, "^-06.3f"))) #prog_bar.set_description(f"{epoch} [D loss: {d_loss[0]} | D Accuracy: {100 * d_loss[1]}] [G loss: {g_loss}]") #print(f"{epoch} [D loss: {d_loss[0]} | D Accuracy: {100 * d_loss[1]}] [G loss: {g_loss}]") self.save_imgs(save_interval) save_path = self.output_directory + "/models" if not os.path.exists(save_path): os.makedirs(save_path) self.discriminator.save(save_path + f"/{epoch} discrim.h5") self.generator.save(save_path + f"/{epoch} generat.h5") def gene_imgs(self, count): " Generate images from the currently loaded model" noise = np.random.normal(0, 1, (count, 100)) return self.generator.predict(noise) def save_imgs(self, epoch): " Generates n images from the model, and saves them" n = 1019 imgs = self.gene_imgs(n) imgs = 0.5 * imgs + 0.5 for i, img_array in enumerate(imgs): path = f"{self.output_directory}/generated" if not os.path.exists(path): os.makedirs(path) imsave(path + f"/{epoch}_{i}.png", self.pix_array_convert(img_array)) def generate_imgs(self, count, threshold, modifier): """ Generates (count) images from the model ensuring the discriminator scores them between the threshold values and saves them """ self.build_gan() imgs = [] for i in range(count): score = [0] while not(threshold[0] < score[0] < threshold[1]): img = self.gene_imgs(1) score = self.discriminator.predict(img) print("Image found: ", score[0]) imgs.append(img) imgs = np.asarray(imgs).squeeze() imgs = 0.5 * imgs + 0.5 print(imgs.shape) for i, img_array in enumerate(imgs): path = f"{self.output_directory}/generated_{threshold[0]}_{threshold[1]}" if not os.path.exists(path): os.makedirs(path) imsave(path + f"/{modifier}_{i}.png", self.pix_array_convert(img_array)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--load_generator', help='Path to existing generator weights file', default="data/models/generat.h5") parser.add_argument('--load_discriminator', help='Path to existing discriminator weights file', default="data/models/discrim.h5") parser.add_argument('--data', help='Path to directory of images of correct dimensions, using *.[filetype] (e.g. *.png) to reference images', default="128_128/*.tif") parser.add_argument('--sample', help='If given, will generate that many samples from existing model instead of training', default=-1) parser.add_argument('--sample_thresholds', help='The values between which a generated image must score from the discriminator', default="(0.0, 0.1)") parser.add_argument('--batch_size', help='Number of images to train on at once', default=24) parser.add_argument('--image_size', help='Size of images as tuple (height,width). Height and width must both be divisible by (2^5)', default="(128, 128)") parser.add_argument('--epochs', help='Number of epochs to train for', default=50000) parser.add_argument('--save_interval', help='How many epochs to go between saves/outputs', default=50000) parser.add_argument('--output_directory', help="Directory to save weights and images to.", default=f"data/output/") parser.add_argument('--dropout', help="Dropout rate for Discriminator", default=0.25) parser.add_argument('--bn_momentum', help="Batch normalization momentum for generator", default=0.80) parser.add_argument('--adam_lr', help="Learning rate for optimizer (both models)", default=0.0002) parser.add_argument('--adam_beta', help="Beta parameter for optimizer (both models)", default=0.5) args = parser.parse_args() dcgan = DCGAN(args.load_discriminator, args.load_generator, args.output_directory, literal_eval(args.image_size), float(args.dropout), float(args.bn_momentum), float(args.adam_lr), float(args.adam_beta)) if args.sample == -1: dcgan.train(epochs=int(args.epochs), image_path=args.data, batch_size=int(args.batch_size), save_interval=int(args.save_interval)) else: dcgan.generate_imgs(int(args.sample), literal_eval(args.sample_thresholds), "")
# Train the discriminator (real classified as ones and generated as zeros) d_loss_real = self.discriminator.train_on_batch(imgs, np.ones((half_batch, 1))) d_loss_fake = self.discriminator.train_on_batch(gen_imgs, np.zeros((half_batch, 1))) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
random_line_split
revaultd.rs
use common::config::{config_folder_path, BitcoindConfig, Config, ConfigError}; use std::{ collections::HashMap, convert::TryFrom, fmt, fs, io::{self, Read, Write}, net::SocketAddr, path::PathBuf, str::FromStr, time, vec::Vec, }; use revault_net::{ noise::{PublicKey as NoisePubKey, SecretKey as NoisePrivKey}, sodiumoxide::{self, crypto::scalarmult::curve25519}, }; use revault_tx::{ bitcoin::{ secp256k1, util::bip32::{ChildNumber, ExtendedPubKey}, Address, BlockHash, PublicKey as BitcoinPublicKey, Script, TxOut, }, miniscript::descriptor::{DescriptorPublicKey, DescriptorTrait}, scripts::{ CpfpDescriptor, DepositDescriptor, DerivedCpfpDescriptor, DerivedDepositDescriptor, DerivedUnvaultDescriptor, EmergencyAddress, UnvaultDescriptor, }, transactions::{ CancelTransaction, DepositTransaction, EmergencyTransaction, UnvaultEmergencyTransaction, UnvaultTransaction, }, }; /// The status of a [Vault], depends both on the block chain and the set of pre-signed /// transactions #[derive(Debug, Clone, Copy, PartialEq)] pub enum VaultStatus { /// The deposit transaction has less than 6 confirmations Unconfirmed, /// The deposit transaction has more than 6 confirmations Funded, /// The revocation transactions are signed by us Securing, /// The revocation transactions are signed by everyone Secured, /// The unvault transaction is signed (implies that the second emergency and the /// cancel transaction are signed). Activating, /// The unvault transaction is signed (implies that the second emergency and the /// cancel transaction are signed). Active, /// The unvault transaction has been broadcast Unvaulting, /// The unvault transaction is confirmed Unvaulted, /// The cancel transaction has been broadcast Canceling, /// The cancel transaction is confirmed Canceled, /// The first emergency transactions has been broadcast EmergencyVaulting, /// The first emergency transactions is confirmed EmergencyVaulted, /// The unvault emergency transactions has been broadcast UnvaultEmergencyVaulting, /// The unvault emergency transactions is confirmed UnvaultEmergencyVaulted, /// The spend transaction has been broadcast Spending, // TODO: At what depth do we forget it ? /// The spend transaction is confirmed Spent, } impl TryFrom<u32> for VaultStatus { type Error = (); fn try_from(n: u32) -> Result<Self, Self::Error> { match n { 0 => Ok(Self::Unconfirmed), 1 => Ok(Self::Funded), 2 => Ok(Self::Securing), 3 => Ok(Self::Secured), 4 => Ok(Self::Activating), 5 => Ok(Self::Active), 6 => Ok(Self::Unvaulting), 7 => Ok(Self::Unvaulted), 8 => Ok(Self::Canceling), 9 => Ok(Self::Canceled), 10 => Ok(Self::EmergencyVaulting), 11 => Ok(Self::EmergencyVaulted), 12 => Ok(Self::UnvaultEmergencyVaulting), 13 => Ok(Self::UnvaultEmergencyVaulted), 14 => Ok(Self::Spending), 15 => Ok(Self::Spent), _ => Err(()), } } } impl FromStr for VaultStatus { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "unconfirmed" => Ok(Self::Unconfirmed), "funded" => Ok(Self::Funded), "securing" => Ok(Self::Securing), "secured" => Ok(Self::Secured), "activating" => Ok(Self::Activating), "active" => Ok(Self::Active), "unvaulting" => Ok(Self::Unvaulting), "unvaulted" => Ok(Self::Unvaulted), "canceling" => Ok(Self::Canceling), "canceled" => Ok(Self::Canceled), "emergencyvaulting" => Ok(Self::EmergencyVaulting), "emergencyvaulted" => Ok(Self::EmergencyVaulted), "unvaultemergencyvaulting" => Ok(Self::UnvaultEmergencyVaulting), "unvaultemergencyvaulted" => Ok(Self::UnvaultEmergencyVaulted), "spending" => Ok(Self::Spending), "spent" => Ok(Self::Spent), _ => Err(()), } } } impl fmt::Display for VaultStatus { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{}", match *self { Self::Unconfirmed => "unconfirmed", Self::Funded => "funded", Self::Securing => "securing", Self::Secured => "secured", Self::Activating => "activating", Self::Active => "active", Self::Unvaulting => "unvaulting", Self::Unvaulted => "unvaulted", Self::Canceling => "canceling", Self::Canceled => "canceled", Self::EmergencyVaulting => "emergencyvaulting", Self::EmergencyVaulted => "emergencyvaulted", Self::UnvaultEmergencyVaulting => "unvaultemergencyvaulting", Self::UnvaultEmergencyVaulted => "unvaultemergencyvaulted", Self::Spending => "spending", Self::Spent => "spent", } ) } } // An error related to the initialization of communication keys #[derive(Debug)] enum KeyError { ReadingKey(io::Error), WritingKey(io::Error), } impl fmt::Display for KeyError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Self::ReadingKey(e) => write!(f, "Error reading Noise key: '{}'", e), Self::WritingKey(e) => write!(f, "Error writing Noise key: '{}'", e), } } } impl std::error::Error for KeyError {} // The communication keys are (for now) hot, so we just create it ourselves on first run. fn read_or_create_noise_key(secret_file: PathBuf) -> Result<NoisePrivKey, KeyError> { let mut noise_secret = NoisePrivKey([0; 32]); if !secret_file.as_path().exists() { log::info!( "No Noise private key at '{:?}', generating a new one", secret_file ); noise_secret = sodiumoxide::crypto::box_::gen_keypair().1; // We create it in read-only but open it in write only. let mut options = fs::OpenOptions::new(); options = options.write(true).create_new(true).clone(); // FIXME: handle Windows ACLs #[cfg(unix)] { use std::os::unix::fs::OpenOptionsExt; options = options.mode(0o400).clone(); } let mut fd = options.open(secret_file).map_err(KeyError::WritingKey)?; fd.write_all(&noise_secret.as_ref()) .map_err(KeyError::WritingKey)?; } else { let mut noise_secret_fd = fs::File::open(secret_file).map_err(KeyError::ReadingKey)?; noise_secret_fd .read_exact(&mut noise_secret.0) .map_err(KeyError::ReadingKey)?; } // TODO: have a decent memory management and mlock() the key assert!(noise_secret.0 != [0; 32]); Ok(noise_secret) } /// A vault is defined as a confirmed utxo paying to the Vault Descriptor for which /// we have a set of pre-signed transaction (emergency, cancel, unvault). /// Depending on its status we may not yet be in possession of part -or the entirety- /// of the pre-signed transactions. /// Likewise, depending on our role (manager or stakeholder), we may not have the /// emergency transactions. pub struct _Vault { pub deposit_txo: TxOut, pub status: VaultStatus, pub vault_tx: Option<DepositTransaction>, pub emergency_tx: Option<EmergencyTransaction>, pub unvault_tx: Option<UnvaultTransaction>, pub cancel_tx: Option<CancelTransaction>, pub unvault_emergency_tx: Option<UnvaultEmergencyTransaction>, } #[derive(Debug, PartialEq, Copy, Clone)] pub struct BlockchainTip { pub height: u32, pub hash: BlockHash, } /// Our global state pub struct RevaultD { // Bitcoind stuff /// Everything we need to know to talk to bitcoind pub bitcoind_config: BitcoindConfig, /// Last block we heard about pub tip: Option<BlockchainTip>, /// Minimum confirmations before considering a deposit as mature pub min_conf: u32, // Scripts stuff /// Who am i, and where am i in all this mess ? pub our_stk_xpub: Option<ExtendedPubKey>, pub our_man_xpub: Option<ExtendedPubKey>, /// The miniscript descriptor of vault's outputs scripts pub deposit_descriptor: DepositDescriptor, /// The miniscript descriptor of unvault's outputs scripts pub unvault_descriptor: UnvaultDescriptor, /// The miniscript descriptor of CPFP output scripts (in unvault and spend transaction) pub cpfp_descriptor: CpfpDescriptor, /// The Emergency address, only available if we are a stakeholder pub emergency_address: Option<EmergencyAddress>, /// We don't make an enormous deal of address reuse (we cancel to the same keys), /// however we at least try to generate new addresses once they're used. // FIXME: think more about desync reconciliation.. pub current_unused_index: ChildNumber, /// The secp context required by the xpub one.. We'll eventually use it to verify keys. pub secp_ctx: secp256k1::Secp256k1<secp256k1::VerifyOnly>, /// The locktime to use on all created transaction. Always 0 for now. pub lock_time: u32, // Network stuff /// The static private key we use to establish connections to servers. We reuse it, but Trevor /// said it's fine! https://github.com/noiseprotocol/noise_spec/blob/master/noise.md#14-security-considerations pub noise_secret: NoisePrivKey, /// The ip:port the coordinator is listening on. TODO: Tor pub coordinator_host: SocketAddr, /// The static public key to enact the Noise channel with the Coordinator pub coordinator_noisekey: NoisePubKey, pub coordinator_poll_interval: time::Duration, /// The ip:port (TODO: Tor) and Noise public key of each cosigning server, only set if we are /// a manager. pub cosigs: Option<Vec<(SocketAddr, NoisePubKey)>>, // 'Wallet' stuff /// A map from a scriptPubKey to a derivation index. Used to retrieve the actual public /// keys used to generate a script from bitcoind until we can pass it xpub-expressed /// Miniscript descriptors. pub derivation_index_map: HashMap<Script, ChildNumber>, /// The id of the wallet used in the db pub wallet_id: Option<u32>, // Misc stuff /// We store all our data in one place, that's here. pub data_dir: PathBuf, /// Should we run as a daemon? (Default: yes) pub daemon: bool, // TODO: servers connection stuff } fn create_datadir(datadir_path: &PathBuf) -> Result<(), std::io::Error> { #[cfg(unix)] return { use fs::DirBuilder; use std::os::unix::fs::DirBuilderExt; let mut builder = DirBuilder::new(); builder.mode(0o700).recursive(true).create(datadir_path) }; #[cfg(not(unix))] return { // FIXME: make Windows secure (again?) fs::create_dir_all(datadir_path) }; } impl RevaultD { /// Creates our global state by consuming the static configuration pub fn from_config(config: Config) -> Result<RevaultD, Box<dyn std::error::Error>> { let our_man_xpub = config.manager_config.as_ref().map(|x| x.xpub); let our_stk_xpub = config.stakeholder_config.as_ref().map(|x| x.xpub); // Config should have checked that! assert!(our_man_xpub.is_some() || our_stk_xpub.is_some()); let deposit_descriptor = config.scripts_config.deposit_descriptor; let unvault_descriptor = config.scripts_config.unvault_descriptor; let cpfp_descriptor = config.scripts_config.cpfp_descriptor; let emergency_address = config.stakeholder_config.map(|x| x.emergency_address); let mut data_dir = config.data_dir.unwrap_or(config_folder_path()?); data_dir.push(config.bitcoind_config.network.to_string()); if !data_dir.as_path().exists() { if let Err(e) = create_datadir(&data_dir) { return Err(Box::from(ConfigError(format!( "Could not create data dir '{:?}': {}.", data_dir, e.to_string() )))); } } data_dir = fs::canonicalize(data_dir)?; let data_dir_str = data_dir .to_str() .expect("Impossible: the datadir path is valid unicode"); let noise_secret_file = [data_dir_str, "noise_secret"].iter().collect(); let noise_secret = read_or_create_noise_key(noise_secret_file)?; // TODO: support hidden services let coordinator_host = SocketAddr::from_str(&config.coordinator_host)?; let coordinator_noisekey = config.coordinator_noise_key; let coordinator_poll_interval = config.coordinator_poll_seconds; let cosigs = config.manager_config.map(|config| { config .cosigners .into_iter() .map(|config| (config.host, config.noise_key)) .collect() }); let daemon = !matches!(config.daemon, Some(false)); let secp_ctx = secp256k1::Secp256k1::verification_only(); Ok(RevaultD { our_stk_xpub, our_man_xpub, deposit_descriptor, unvault_descriptor, cpfp_descriptor, secp_ctx, data_dir, daemon, emergency_address, noise_secret, coordinator_host, coordinator_noisekey, coordinator_poll_interval, cosigs, lock_time: 0, min_conf: config.min_conf, bitcoind_config: config.bitcoind_config, tip: None, // Will be updated by the database current_unused_index: ChildNumber::from(0), // FIXME: we don't need SipHash for those, use a faster alternative derivation_index_map: HashMap::new(), // Will be updated soon (:tm:) wallet_id: None, }) } fn file_from_datadir(&self, file_name: &str) -> PathBuf { let data_dir_str = self .data_dir .to_str() .expect("Impossible: the datadir path is valid unicode"); [data_dir_str, file_name].iter().collect() } /// Our Noise static public key pub fn noise_pubkey(&self) -> NoisePubKey { let scalar = curve25519::Scalar(self.noise_secret.0); NoisePubKey(curve25519::scalarmult_base(&scalar).0) } pub fn vault_address(&self, child_number: ChildNumber) -> Address { self.deposit_descriptor .derive(child_number, &self.secp_ctx) .inner() .address(self.bitcoind_config.network) .expect("deposit_descriptor is a wsh") } pub fn unvault_address(&self, child_number: ChildNumber) -> Address { self.unvault_descriptor .derive(child_number, &self.secp_ctx) .inner() .address(self.bitcoind_config.network) .expect("unvault_descriptor is a wsh") } pub fn gap_limit(&self) -> u32 { 100 } pub fn watchonly_wallet_name(&self) -> Option<String> { self.wallet_id .map(|ref id| format!("revaultd-watchonly-wallet-{}", id)) } pub fn log_file(&self) -> PathBuf { self.file_from_datadir("log") } pub fn pid_file(&self) -> PathBuf { self.file_from_datadir("revaultd.pid") } pub fn db_file(&self) -> PathBuf { self.file_from_datadir("revaultd.sqlite3") } pub fn watchonly_wallet_file(&self) -> Option<String> { self.watchonly_wallet_name().map(|ref name| { self.file_from_datadir(name) .to_str() .expect("Valid utf-8") .to_string() }) } pub fn rpc_socket_file(&self) -> PathBuf { self.file_from_datadir("revaultd_rpc") } pub fn is_stakeholder(&self) -> bool { self.our_stk_xpub.is_some() } pub fn is_manager(&self) -> bool { self.our_man_xpub.is_some() } pub fn
(&self) -> Address { self.vault_address(self.current_unused_index) } pub fn last_deposit_address(&self) -> Address { let raw_index: u32 = self.current_unused_index.into(); // FIXME: this should fail instead of creating a hardened index self.vault_address(ChildNumber::from(raw_index + self.gap_limit())) } pub fn last_unvault_address(&self) -> Address { let raw_index: u32 = self.current_unused_index.into(); // FIXME: this should fail instead of creating a hardened index self.unvault_address(ChildNumber::from(raw_index + self.gap_limit())) } /// All deposit addresses as strings up to the gap limit (100) pub fn all_deposit_addresses(&mut self) -> Vec<String> { self.derivation_index_map .keys() .map(|s| { Address::from_script(s, self.bitcoind_config.network) .expect("Created from P2WSH address") .to_string() }) .collect() } /// All unvault addresses as strings up to the gap limit (100) pub fn all_unvault_addresses(&mut self) -> Vec<String> { let raw_index: u32 = self.current_unused_index.into(); (0..raw_index + self.gap_limit()) .map(|raw_index| { // FIXME: this should fail instead of creating a hardened index self.unvault_address(ChildNumber::from(raw_index)) .to_string() }) .collect() } pub fn derived_deposit_descriptor(&self, index: ChildNumber) -> DerivedDepositDescriptor { self.deposit_descriptor.derive(index, &self.secp_ctx) } pub fn derived_unvault_descriptor(&self, index: ChildNumber) -> DerivedUnvaultDescriptor { self.unvault_descriptor.derive(index, &self.secp_ctx) } pub fn derived_cpfp_descriptor(&self, index: ChildNumber) -> DerivedCpfpDescriptor { self.cpfp_descriptor.derive(index, &self.secp_ctx) } pub fn stakeholders_xpubs(&self) -> Vec<DescriptorPublicKey> { self.deposit_descriptor.xpubs() } pub fn managers_xpubs(&self) -> Vec<DescriptorPublicKey> { // The managers' xpubs are all the xpubs from the Unvault descriptor except the // Stakehodlers' ones and the Cosigning Servers' ones. let stk_xpubs = self.stakeholders_xpubs(); self.unvault_descriptor .xpubs() .into_iter() .filter_map(|xpub| { match xpub { DescriptorPublicKey::SinglePub(_) => None, // Cosig DescriptorPublicKey::XPub(_) => { if stk_xpubs.contains(&xpub) { None // Stakeholder } else { Some(xpub) // Manager } } } }) .collect() } pub fn stakeholders_xpubs_at(&self, index: ChildNumber) -> Vec<BitcoinPublicKey> { self.deposit_descriptor .xpubs() .into_iter() .map(|desc_xpub| { desc_xpub .derive(index.into()) .derive_public_key(&self.secp_ctx) .expect("Is derived, and there is never any hardened path") }) .collect() } pub fn our_stk_xpub_at(&self, index: ChildNumber) -> Option<BitcoinPublicKey> { self.our_stk_xpub.map(|xpub| { xpub.derive_pub(&self.secp_ctx, &[index]) .expect("The derivation index stored in the database is sane (unhardened)") .public_key }) } pub fn managers_threshold(&self) -> usize { self.unvault_descriptor .managers_threshold() .unwrap_or(self.managers_xpubs().len()) } } #[cfg(test)] mod tests { use super::RevaultD; use common::config::Config; use std::path::PathBuf; #[test] fn test_from_config() { let mut path = PathBuf::from(file!()).parent().unwrap().to_path_buf(); path.push("../../test_data/invalid_config.toml"); Config::from_file(Some(path.clone())).expect_err("Parsing invalid config file"); path.pop(); path.push("valid_config.toml"); let config = Config::from_file(Some(path)).expect("Parsing valid config file"); RevaultD::from_config(config).expect("Creating state from config"); // TODO: test actual fields.. } }
deposit_address
identifier_name
revaultd.rs
use common::config::{config_folder_path, BitcoindConfig, Config, ConfigError}; use std::{ collections::HashMap, convert::TryFrom, fmt, fs, io::{self, Read, Write}, net::SocketAddr, path::PathBuf, str::FromStr, time, vec::Vec, }; use revault_net::{ noise::{PublicKey as NoisePubKey, SecretKey as NoisePrivKey}, sodiumoxide::{self, crypto::scalarmult::curve25519}, }; use revault_tx::{ bitcoin::{ secp256k1, util::bip32::{ChildNumber, ExtendedPubKey}, Address, BlockHash, PublicKey as BitcoinPublicKey, Script, TxOut, }, miniscript::descriptor::{DescriptorPublicKey, DescriptorTrait}, scripts::{ CpfpDescriptor, DepositDescriptor, DerivedCpfpDescriptor, DerivedDepositDescriptor, DerivedUnvaultDescriptor, EmergencyAddress, UnvaultDescriptor, }, transactions::{ CancelTransaction, DepositTransaction, EmergencyTransaction, UnvaultEmergencyTransaction, UnvaultTransaction, }, }; /// The status of a [Vault], depends both on the block chain and the set of pre-signed /// transactions #[derive(Debug, Clone, Copy, PartialEq)] pub enum VaultStatus { /// The deposit transaction has less than 6 confirmations Unconfirmed, /// The deposit transaction has more than 6 confirmations Funded, /// The revocation transactions are signed by us Securing, /// The revocation transactions are signed by everyone Secured, /// The unvault transaction is signed (implies that the second emergency and the /// cancel transaction are signed). Activating, /// The unvault transaction is signed (implies that the second emergency and the /// cancel transaction are signed). Active, /// The unvault transaction has been broadcast Unvaulting, /// The unvault transaction is confirmed Unvaulted, /// The cancel transaction has been broadcast Canceling, /// The cancel transaction is confirmed Canceled, /// The first emergency transactions has been broadcast EmergencyVaulting, /// The first emergency transactions is confirmed EmergencyVaulted, /// The unvault emergency transactions has been broadcast UnvaultEmergencyVaulting, /// The unvault emergency transactions is confirmed UnvaultEmergencyVaulted, /// The spend transaction has been broadcast Spending, // TODO: At what depth do we forget it ? /// The spend transaction is confirmed Spent, } impl TryFrom<u32> for VaultStatus { type Error = (); fn try_from(n: u32) -> Result<Self, Self::Error> { match n { 0 => Ok(Self::Unconfirmed), 1 => Ok(Self::Funded), 2 => Ok(Self::Securing), 3 => Ok(Self::Secured), 4 => Ok(Self::Activating), 5 => Ok(Self::Active), 6 => Ok(Self::Unvaulting), 7 => Ok(Self::Unvaulted), 8 => Ok(Self::Canceling), 9 => Ok(Self::Canceled), 10 => Ok(Self::EmergencyVaulting), 11 => Ok(Self::EmergencyVaulted), 12 => Ok(Self::UnvaultEmergencyVaulting), 13 => Ok(Self::UnvaultEmergencyVaulted), 14 => Ok(Self::Spending), 15 => Ok(Self::Spent), _ => Err(()), } } } impl FromStr for VaultStatus { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "unconfirmed" => Ok(Self::Unconfirmed), "funded" => Ok(Self::Funded), "securing" => Ok(Self::Securing), "secured" => Ok(Self::Secured), "activating" => Ok(Self::Activating), "active" => Ok(Self::Active), "unvaulting" => Ok(Self::Unvaulting), "unvaulted" => Ok(Self::Unvaulted), "canceling" => Ok(Self::Canceling), "canceled" => Ok(Self::Canceled), "emergencyvaulting" => Ok(Self::EmergencyVaulting), "emergencyvaulted" => Ok(Self::EmergencyVaulted), "unvaultemergencyvaulting" => Ok(Self::UnvaultEmergencyVaulting), "unvaultemergencyvaulted" => Ok(Self::UnvaultEmergencyVaulted), "spending" => Ok(Self::Spending), "spent" => Ok(Self::Spent), _ => Err(()), } } } impl fmt::Display for VaultStatus { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{}", match *self { Self::Unconfirmed => "unconfirmed", Self::Funded => "funded", Self::Securing => "securing", Self::Secured => "secured", Self::Activating => "activating", Self::Active => "active", Self::Unvaulting => "unvaulting", Self::Unvaulted => "unvaulted", Self::Canceling => "canceling", Self::Canceled => "canceled", Self::EmergencyVaulting => "emergencyvaulting", Self::EmergencyVaulted => "emergencyvaulted", Self::UnvaultEmergencyVaulting => "unvaultemergencyvaulting", Self::UnvaultEmergencyVaulted => "unvaultemergencyvaulted", Self::Spending => "spending", Self::Spent => "spent", } ) } } // An error related to the initialization of communication keys #[derive(Debug)] enum KeyError { ReadingKey(io::Error), WritingKey(io::Error), } impl fmt::Display for KeyError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Self::ReadingKey(e) => write!(f, "Error reading Noise key: '{}'", e), Self::WritingKey(e) => write!(f, "Error writing Noise key: '{}'", e), } } } impl std::error::Error for KeyError {} // The communication keys are (for now) hot, so we just create it ourselves on first run. fn read_or_create_noise_key(secret_file: PathBuf) -> Result<NoisePrivKey, KeyError> { let mut noise_secret = NoisePrivKey([0; 32]); if !secret_file.as_path().exists() { log::info!( "No Noise private key at '{:?}', generating a new one", secret_file ); noise_secret = sodiumoxide::crypto::box_::gen_keypair().1; // We create it in read-only but open it in write only. let mut options = fs::OpenOptions::new(); options = options.write(true).create_new(true).clone(); // FIXME: handle Windows ACLs #[cfg(unix)] { use std::os::unix::fs::OpenOptionsExt; options = options.mode(0o400).clone(); } let mut fd = options.open(secret_file).map_err(KeyError::WritingKey)?; fd.write_all(&noise_secret.as_ref()) .map_err(KeyError::WritingKey)?; } else { let mut noise_secret_fd = fs::File::open(secret_file).map_err(KeyError::ReadingKey)?; noise_secret_fd .read_exact(&mut noise_secret.0) .map_err(KeyError::ReadingKey)?; } // TODO: have a decent memory management and mlock() the key assert!(noise_secret.0 != [0; 32]); Ok(noise_secret) } /// A vault is defined as a confirmed utxo paying to the Vault Descriptor for which /// we have a set of pre-signed transaction (emergency, cancel, unvault). /// Depending on its status we may not yet be in possession of part -or the entirety- /// of the pre-signed transactions. /// Likewise, depending on our role (manager or stakeholder), we may not have the /// emergency transactions. pub struct _Vault { pub deposit_txo: TxOut, pub status: VaultStatus, pub vault_tx: Option<DepositTransaction>, pub emergency_tx: Option<EmergencyTransaction>, pub unvault_tx: Option<UnvaultTransaction>, pub cancel_tx: Option<CancelTransaction>, pub unvault_emergency_tx: Option<UnvaultEmergencyTransaction>, } #[derive(Debug, PartialEq, Copy, Clone)] pub struct BlockchainTip { pub height: u32, pub hash: BlockHash, } /// Our global state pub struct RevaultD { // Bitcoind stuff /// Everything we need to know to talk to bitcoind pub bitcoind_config: BitcoindConfig, /// Last block we heard about pub tip: Option<BlockchainTip>, /// Minimum confirmations before considering a deposit as mature pub min_conf: u32, // Scripts stuff /// Who am i, and where am i in all this mess ? pub our_stk_xpub: Option<ExtendedPubKey>, pub our_man_xpub: Option<ExtendedPubKey>, /// The miniscript descriptor of vault's outputs scripts pub deposit_descriptor: DepositDescriptor, /// The miniscript descriptor of unvault's outputs scripts pub unvault_descriptor: UnvaultDescriptor, /// The miniscript descriptor of CPFP output scripts (in unvault and spend transaction) pub cpfp_descriptor: CpfpDescriptor, /// The Emergency address, only available if we are a stakeholder pub emergency_address: Option<EmergencyAddress>, /// We don't make an enormous deal of address reuse (we cancel to the same keys), /// however we at least try to generate new addresses once they're used. // FIXME: think more about desync reconciliation.. pub current_unused_index: ChildNumber, /// The secp context required by the xpub one.. We'll eventually use it to verify keys. pub secp_ctx: secp256k1::Secp256k1<secp256k1::VerifyOnly>, /// The locktime to use on all created transaction. Always 0 for now. pub lock_time: u32, // Network stuff /// The static private key we use to establish connections to servers. We reuse it, but Trevor /// said it's fine! https://github.com/noiseprotocol/noise_spec/blob/master/noise.md#14-security-considerations pub noise_secret: NoisePrivKey, /// The ip:port the coordinator is listening on. TODO: Tor pub coordinator_host: SocketAddr, /// The static public key to enact the Noise channel with the Coordinator pub coordinator_noisekey: NoisePubKey, pub coordinator_poll_interval: time::Duration, /// The ip:port (TODO: Tor) and Noise public key of each cosigning server, only set if we are /// a manager. pub cosigs: Option<Vec<(SocketAddr, NoisePubKey)>>, // 'Wallet' stuff /// A map from a scriptPubKey to a derivation index. Used to retrieve the actual public /// keys used to generate a script from bitcoind until we can pass it xpub-expressed /// Miniscript descriptors. pub derivation_index_map: HashMap<Script, ChildNumber>, /// The id of the wallet used in the db pub wallet_id: Option<u32>, // Misc stuff /// We store all our data in one place, that's here. pub data_dir: PathBuf, /// Should we run as a daemon? (Default: yes) pub daemon: bool, // TODO: servers connection stuff } fn create_datadir(datadir_path: &PathBuf) -> Result<(), std::io::Error> { #[cfg(unix)] return { use fs::DirBuilder; use std::os::unix::fs::DirBuilderExt; let mut builder = DirBuilder::new(); builder.mode(0o700).recursive(true).create(datadir_path) }; #[cfg(not(unix))] return { // FIXME: make Windows secure (again?) fs::create_dir_all(datadir_path) }; } impl RevaultD { /// Creates our global state by consuming the static configuration pub fn from_config(config: Config) -> Result<RevaultD, Box<dyn std::error::Error>> { let our_man_xpub = config.manager_config.as_ref().map(|x| x.xpub); let our_stk_xpub = config.stakeholder_config.as_ref().map(|x| x.xpub); // Config should have checked that! assert!(our_man_xpub.is_some() || our_stk_xpub.is_some()); let deposit_descriptor = config.scripts_config.deposit_descriptor; let unvault_descriptor = config.scripts_config.unvault_descriptor; let cpfp_descriptor = config.scripts_config.cpfp_descriptor; let emergency_address = config.stakeholder_config.map(|x| x.emergency_address); let mut data_dir = config.data_dir.unwrap_or(config_folder_path()?); data_dir.push(config.bitcoind_config.network.to_string()); if !data_dir.as_path().exists() { if let Err(e) = create_datadir(&data_dir) { return Err(Box::from(ConfigError(format!( "Could not create data dir '{:?}': {}.", data_dir,
e.to_string() )))); } } data_dir = fs::canonicalize(data_dir)?; let data_dir_str = data_dir .to_str() .expect("Impossible: the datadir path is valid unicode"); let noise_secret_file = [data_dir_str, "noise_secret"].iter().collect(); let noise_secret = read_or_create_noise_key(noise_secret_file)?; // TODO: support hidden services let coordinator_host = SocketAddr::from_str(&config.coordinator_host)?; let coordinator_noisekey = config.coordinator_noise_key; let coordinator_poll_interval = config.coordinator_poll_seconds; let cosigs = config.manager_config.map(|config| { config .cosigners .into_iter() .map(|config| (config.host, config.noise_key)) .collect() }); let daemon = !matches!(config.daemon, Some(false)); let secp_ctx = secp256k1::Secp256k1::verification_only(); Ok(RevaultD { our_stk_xpub, our_man_xpub, deposit_descriptor, unvault_descriptor, cpfp_descriptor, secp_ctx, data_dir, daemon, emergency_address, noise_secret, coordinator_host, coordinator_noisekey, coordinator_poll_interval, cosigs, lock_time: 0, min_conf: config.min_conf, bitcoind_config: config.bitcoind_config, tip: None, // Will be updated by the database current_unused_index: ChildNumber::from(0), // FIXME: we don't need SipHash for those, use a faster alternative derivation_index_map: HashMap::new(), // Will be updated soon (:tm:) wallet_id: None, }) } fn file_from_datadir(&self, file_name: &str) -> PathBuf { let data_dir_str = self .data_dir .to_str() .expect("Impossible: the datadir path is valid unicode"); [data_dir_str, file_name].iter().collect() } /// Our Noise static public key pub fn noise_pubkey(&self) -> NoisePubKey { let scalar = curve25519::Scalar(self.noise_secret.0); NoisePubKey(curve25519::scalarmult_base(&scalar).0) } pub fn vault_address(&self, child_number: ChildNumber) -> Address { self.deposit_descriptor .derive(child_number, &self.secp_ctx) .inner() .address(self.bitcoind_config.network) .expect("deposit_descriptor is a wsh") } pub fn unvault_address(&self, child_number: ChildNumber) -> Address { self.unvault_descriptor .derive(child_number, &self.secp_ctx) .inner() .address(self.bitcoind_config.network) .expect("unvault_descriptor is a wsh") } pub fn gap_limit(&self) -> u32 { 100 } pub fn watchonly_wallet_name(&self) -> Option<String> { self.wallet_id .map(|ref id| format!("revaultd-watchonly-wallet-{}", id)) } pub fn log_file(&self) -> PathBuf { self.file_from_datadir("log") } pub fn pid_file(&self) -> PathBuf { self.file_from_datadir("revaultd.pid") } pub fn db_file(&self) -> PathBuf { self.file_from_datadir("revaultd.sqlite3") } pub fn watchonly_wallet_file(&self) -> Option<String> { self.watchonly_wallet_name().map(|ref name| { self.file_from_datadir(name) .to_str() .expect("Valid utf-8") .to_string() }) } pub fn rpc_socket_file(&self) -> PathBuf { self.file_from_datadir("revaultd_rpc") } pub fn is_stakeholder(&self) -> bool { self.our_stk_xpub.is_some() } pub fn is_manager(&self) -> bool { self.our_man_xpub.is_some() } pub fn deposit_address(&self) -> Address { self.vault_address(self.current_unused_index) } pub fn last_deposit_address(&self) -> Address { let raw_index: u32 = self.current_unused_index.into(); // FIXME: this should fail instead of creating a hardened index self.vault_address(ChildNumber::from(raw_index + self.gap_limit())) } pub fn last_unvault_address(&self) -> Address { let raw_index: u32 = self.current_unused_index.into(); // FIXME: this should fail instead of creating a hardened index self.unvault_address(ChildNumber::from(raw_index + self.gap_limit())) } /// All deposit addresses as strings up to the gap limit (100) pub fn all_deposit_addresses(&mut self) -> Vec<String> { self.derivation_index_map .keys() .map(|s| { Address::from_script(s, self.bitcoind_config.network) .expect("Created from P2WSH address") .to_string() }) .collect() } /// All unvault addresses as strings up to the gap limit (100) pub fn all_unvault_addresses(&mut self) -> Vec<String> { let raw_index: u32 = self.current_unused_index.into(); (0..raw_index + self.gap_limit()) .map(|raw_index| { // FIXME: this should fail instead of creating a hardened index self.unvault_address(ChildNumber::from(raw_index)) .to_string() }) .collect() } pub fn derived_deposit_descriptor(&self, index: ChildNumber) -> DerivedDepositDescriptor { self.deposit_descriptor.derive(index, &self.secp_ctx) } pub fn derived_unvault_descriptor(&self, index: ChildNumber) -> DerivedUnvaultDescriptor { self.unvault_descriptor.derive(index, &self.secp_ctx) } pub fn derived_cpfp_descriptor(&self, index: ChildNumber) -> DerivedCpfpDescriptor { self.cpfp_descriptor.derive(index, &self.secp_ctx) } pub fn stakeholders_xpubs(&self) -> Vec<DescriptorPublicKey> { self.deposit_descriptor.xpubs() } pub fn managers_xpubs(&self) -> Vec<DescriptorPublicKey> { // The managers' xpubs are all the xpubs from the Unvault descriptor except the // Stakehodlers' ones and the Cosigning Servers' ones. let stk_xpubs = self.stakeholders_xpubs(); self.unvault_descriptor .xpubs() .into_iter() .filter_map(|xpub| { match xpub { DescriptorPublicKey::SinglePub(_) => None, // Cosig DescriptorPublicKey::XPub(_) => { if stk_xpubs.contains(&xpub) { None // Stakeholder } else { Some(xpub) // Manager } } } }) .collect() } pub fn stakeholders_xpubs_at(&self, index: ChildNumber) -> Vec<BitcoinPublicKey> { self.deposit_descriptor .xpubs() .into_iter() .map(|desc_xpub| { desc_xpub .derive(index.into()) .derive_public_key(&self.secp_ctx) .expect("Is derived, and there is never any hardened path") }) .collect() } pub fn our_stk_xpub_at(&self, index: ChildNumber) -> Option<BitcoinPublicKey> { self.our_stk_xpub.map(|xpub| { xpub.derive_pub(&self.secp_ctx, &[index]) .expect("The derivation index stored in the database is sane (unhardened)") .public_key }) } pub fn managers_threshold(&self) -> usize { self.unvault_descriptor .managers_threshold() .unwrap_or(self.managers_xpubs().len()) } } #[cfg(test)] mod tests { use super::RevaultD; use common::config::Config; use std::path::PathBuf; #[test] fn test_from_config() { let mut path = PathBuf::from(file!()).parent().unwrap().to_path_buf(); path.push("../../test_data/invalid_config.toml"); Config::from_file(Some(path.clone())).expect_err("Parsing invalid config file"); path.pop(); path.push("valid_config.toml"); let config = Config::from_file(Some(path)).expect("Parsing valid config file"); RevaultD::from_config(config).expect("Creating state from config"); // TODO: test actual fields.. } }
random_line_split
revaultd.rs
use common::config::{config_folder_path, BitcoindConfig, Config, ConfigError}; use std::{ collections::HashMap, convert::TryFrom, fmt, fs, io::{self, Read, Write}, net::SocketAddr, path::PathBuf, str::FromStr, time, vec::Vec, }; use revault_net::{ noise::{PublicKey as NoisePubKey, SecretKey as NoisePrivKey}, sodiumoxide::{self, crypto::scalarmult::curve25519}, }; use revault_tx::{ bitcoin::{ secp256k1, util::bip32::{ChildNumber, ExtendedPubKey}, Address, BlockHash, PublicKey as BitcoinPublicKey, Script, TxOut, }, miniscript::descriptor::{DescriptorPublicKey, DescriptorTrait}, scripts::{ CpfpDescriptor, DepositDescriptor, DerivedCpfpDescriptor, DerivedDepositDescriptor, DerivedUnvaultDescriptor, EmergencyAddress, UnvaultDescriptor, }, transactions::{ CancelTransaction, DepositTransaction, EmergencyTransaction, UnvaultEmergencyTransaction, UnvaultTransaction, }, }; /// The status of a [Vault], depends both on the block chain and the set of pre-signed /// transactions #[derive(Debug, Clone, Copy, PartialEq)] pub enum VaultStatus { /// The deposit transaction has less than 6 confirmations Unconfirmed, /// The deposit transaction has more than 6 confirmations Funded, /// The revocation transactions are signed by us Securing, /// The revocation transactions are signed by everyone Secured, /// The unvault transaction is signed (implies that the second emergency and the /// cancel transaction are signed). Activating, /// The unvault transaction is signed (implies that the second emergency and the /// cancel transaction are signed). Active, /// The unvault transaction has been broadcast Unvaulting, /// The unvault transaction is confirmed Unvaulted, /// The cancel transaction has been broadcast Canceling, /// The cancel transaction is confirmed Canceled, /// The first emergency transactions has been broadcast EmergencyVaulting, /// The first emergency transactions is confirmed EmergencyVaulted, /// The unvault emergency transactions has been broadcast UnvaultEmergencyVaulting, /// The unvault emergency transactions is confirmed UnvaultEmergencyVaulted, /// The spend transaction has been broadcast Spending, // TODO: At what depth do we forget it ? /// The spend transaction is confirmed Spent, } impl TryFrom<u32> for VaultStatus { type Error = (); fn try_from(n: u32) -> Result<Self, Self::Error> { match n { 0 => Ok(Self::Unconfirmed), 1 => Ok(Self::Funded), 2 => Ok(Self::Securing), 3 => Ok(Self::Secured), 4 => Ok(Self::Activating), 5 => Ok(Self::Active), 6 => Ok(Self::Unvaulting), 7 => Ok(Self::Unvaulted), 8 => Ok(Self::Canceling), 9 => Ok(Self::Canceled), 10 => Ok(Self::EmergencyVaulting), 11 => Ok(Self::EmergencyVaulted), 12 => Ok(Self::UnvaultEmergencyVaulting), 13 => Ok(Self::UnvaultEmergencyVaulted), 14 => Ok(Self::Spending), 15 => Ok(Self::Spent), _ => Err(()), } } } impl FromStr for VaultStatus { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "unconfirmed" => Ok(Self::Unconfirmed), "funded" => Ok(Self::Funded), "securing" => Ok(Self::Securing), "secured" => Ok(Self::Secured), "activating" => Ok(Self::Activating), "active" => Ok(Self::Active), "unvaulting" => Ok(Self::Unvaulting), "unvaulted" => Ok(Self::Unvaulted), "canceling" => Ok(Self::Canceling), "canceled" => Ok(Self::Canceled), "emergencyvaulting" => Ok(Self::EmergencyVaulting), "emergencyvaulted" => Ok(Self::EmergencyVaulted), "unvaultemergencyvaulting" => Ok(Self::UnvaultEmergencyVaulting), "unvaultemergencyvaulted" => Ok(Self::UnvaultEmergencyVaulted), "spending" => Ok(Self::Spending), "spent" => Ok(Self::Spent), _ => Err(()), } } } impl fmt::Display for VaultStatus { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{}", match *self { Self::Unconfirmed => "unconfirmed", Self::Funded => "funded", Self::Securing => "securing", Self::Secured => "secured", Self::Activating => "activating", Self::Active => "active", Self::Unvaulting => "unvaulting", Self::Unvaulted => "unvaulted", Self::Canceling => "canceling", Self::Canceled => "canceled", Self::EmergencyVaulting => "emergencyvaulting", Self::EmergencyVaulted => "emergencyvaulted", Self::UnvaultEmergencyVaulting => "unvaultemergencyvaulting", Self::UnvaultEmergencyVaulted => "unvaultemergencyvaulted", Self::Spending => "spending", Self::Spent => "spent", } ) } } // An error related to the initialization of communication keys #[derive(Debug)] enum KeyError { ReadingKey(io::Error), WritingKey(io::Error), } impl fmt::Display for KeyError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Self::ReadingKey(e) => write!(f, "Error reading Noise key: '{}'", e), Self::WritingKey(e) => write!(f, "Error writing Noise key: '{}'", e), } } } impl std::error::Error for KeyError {} // The communication keys are (for now) hot, so we just create it ourselves on first run. fn read_or_create_noise_key(secret_file: PathBuf) -> Result<NoisePrivKey, KeyError> { let mut noise_secret = NoisePrivKey([0; 32]); if !secret_file.as_path().exists() { log::info!( "No Noise private key at '{:?}', generating a new one", secret_file ); noise_secret = sodiumoxide::crypto::box_::gen_keypair().1; // We create it in read-only but open it in write only. let mut options = fs::OpenOptions::new(); options = options.write(true).create_new(true).clone(); // FIXME: handle Windows ACLs #[cfg(unix)] { use std::os::unix::fs::OpenOptionsExt; options = options.mode(0o400).clone(); } let mut fd = options.open(secret_file).map_err(KeyError::WritingKey)?; fd.write_all(&noise_secret.as_ref()) .map_err(KeyError::WritingKey)?; } else { let mut noise_secret_fd = fs::File::open(secret_file).map_err(KeyError::ReadingKey)?; noise_secret_fd .read_exact(&mut noise_secret.0) .map_err(KeyError::ReadingKey)?; } // TODO: have a decent memory management and mlock() the key assert!(noise_secret.0 != [0; 32]); Ok(noise_secret) } /// A vault is defined as a confirmed utxo paying to the Vault Descriptor for which /// we have a set of pre-signed transaction (emergency, cancel, unvault). /// Depending on its status we may not yet be in possession of part -or the entirety- /// of the pre-signed transactions. /// Likewise, depending on our role (manager or stakeholder), we may not have the /// emergency transactions. pub struct _Vault { pub deposit_txo: TxOut, pub status: VaultStatus, pub vault_tx: Option<DepositTransaction>, pub emergency_tx: Option<EmergencyTransaction>, pub unvault_tx: Option<UnvaultTransaction>, pub cancel_tx: Option<CancelTransaction>, pub unvault_emergency_tx: Option<UnvaultEmergencyTransaction>, } #[derive(Debug, PartialEq, Copy, Clone)] pub struct BlockchainTip { pub height: u32, pub hash: BlockHash, } /// Our global state pub struct RevaultD { // Bitcoind stuff /// Everything we need to know to talk to bitcoind pub bitcoind_config: BitcoindConfig, /// Last block we heard about pub tip: Option<BlockchainTip>, /// Minimum confirmations before considering a deposit as mature pub min_conf: u32, // Scripts stuff /// Who am i, and where am i in all this mess ? pub our_stk_xpub: Option<ExtendedPubKey>, pub our_man_xpub: Option<ExtendedPubKey>, /// The miniscript descriptor of vault's outputs scripts pub deposit_descriptor: DepositDescriptor, /// The miniscript descriptor of unvault's outputs scripts pub unvault_descriptor: UnvaultDescriptor, /// The miniscript descriptor of CPFP output scripts (in unvault and spend transaction) pub cpfp_descriptor: CpfpDescriptor, /// The Emergency address, only available if we are a stakeholder pub emergency_address: Option<EmergencyAddress>, /// We don't make an enormous deal of address reuse (we cancel to the same keys), /// however we at least try to generate new addresses once they're used. // FIXME: think more about desync reconciliation.. pub current_unused_index: ChildNumber, /// The secp context required by the xpub one.. We'll eventually use it to verify keys. pub secp_ctx: secp256k1::Secp256k1<secp256k1::VerifyOnly>, /// The locktime to use on all created transaction. Always 0 for now. pub lock_time: u32, // Network stuff /// The static private key we use to establish connections to servers. We reuse it, but Trevor /// said it's fine! https://github.com/noiseprotocol/noise_spec/blob/master/noise.md#14-security-considerations pub noise_secret: NoisePrivKey, /// The ip:port the coordinator is listening on. TODO: Tor pub coordinator_host: SocketAddr, /// The static public key to enact the Noise channel with the Coordinator pub coordinator_noisekey: NoisePubKey, pub coordinator_poll_interval: time::Duration, /// The ip:port (TODO: Tor) and Noise public key of each cosigning server, only set if we are /// a manager. pub cosigs: Option<Vec<(SocketAddr, NoisePubKey)>>, // 'Wallet' stuff /// A map from a scriptPubKey to a derivation index. Used to retrieve the actual public /// keys used to generate a script from bitcoind until we can pass it xpub-expressed /// Miniscript descriptors. pub derivation_index_map: HashMap<Script, ChildNumber>, /// The id of the wallet used in the db pub wallet_id: Option<u32>, // Misc stuff /// We store all our data in one place, that's here. pub data_dir: PathBuf, /// Should we run as a daemon? (Default: yes) pub daemon: bool, // TODO: servers connection stuff } fn create_datadir(datadir_path: &PathBuf) -> Result<(), std::io::Error> { #[cfg(unix)] return { use fs::DirBuilder; use std::os::unix::fs::DirBuilderExt; let mut builder = DirBuilder::new(); builder.mode(0o700).recursive(true).create(datadir_path) }; #[cfg(not(unix))] return { // FIXME: make Windows secure (again?) fs::create_dir_all(datadir_path) }; } impl RevaultD { /// Creates our global state by consuming the static configuration pub fn from_config(config: Config) -> Result<RevaultD, Box<dyn std::error::Error>> { let our_man_xpub = config.manager_config.as_ref().map(|x| x.xpub); let our_stk_xpub = config.stakeholder_config.as_ref().map(|x| x.xpub); // Config should have checked that! assert!(our_man_xpub.is_some() || our_stk_xpub.is_some()); let deposit_descriptor = config.scripts_config.deposit_descriptor; let unvault_descriptor = config.scripts_config.unvault_descriptor; let cpfp_descriptor = config.scripts_config.cpfp_descriptor; let emergency_address = config.stakeholder_config.map(|x| x.emergency_address); let mut data_dir = config.data_dir.unwrap_or(config_folder_path()?); data_dir.push(config.bitcoind_config.network.to_string()); if !data_dir.as_path().exists() { if let Err(e) = create_datadir(&data_dir) { return Err(Box::from(ConfigError(format!( "Could not create data dir '{:?}': {}.", data_dir, e.to_string() )))); } } data_dir = fs::canonicalize(data_dir)?; let data_dir_str = data_dir .to_str() .expect("Impossible: the datadir path is valid unicode"); let noise_secret_file = [data_dir_str, "noise_secret"].iter().collect(); let noise_secret = read_or_create_noise_key(noise_secret_file)?; // TODO: support hidden services let coordinator_host = SocketAddr::from_str(&config.coordinator_host)?; let coordinator_noisekey = config.coordinator_noise_key; let coordinator_poll_interval = config.coordinator_poll_seconds; let cosigs = config.manager_config.map(|config| { config .cosigners .into_iter() .map(|config| (config.host, config.noise_key)) .collect() }); let daemon = !matches!(config.daemon, Some(false)); let secp_ctx = secp256k1::Secp256k1::verification_only(); Ok(RevaultD { our_stk_xpub, our_man_xpub, deposit_descriptor, unvault_descriptor, cpfp_descriptor, secp_ctx, data_dir, daemon, emergency_address, noise_secret, coordinator_host, coordinator_noisekey, coordinator_poll_interval, cosigs, lock_time: 0, min_conf: config.min_conf, bitcoind_config: config.bitcoind_config, tip: None, // Will be updated by the database current_unused_index: ChildNumber::from(0), // FIXME: we don't need SipHash for those, use a faster alternative derivation_index_map: HashMap::new(), // Will be updated soon (:tm:) wallet_id: None, }) } fn file_from_datadir(&self, file_name: &str) -> PathBuf
/// Our Noise static public key pub fn noise_pubkey(&self) -> NoisePubKey { let scalar = curve25519::Scalar(self.noise_secret.0); NoisePubKey(curve25519::scalarmult_base(&scalar).0) } pub fn vault_address(&self, child_number: ChildNumber) -> Address { self.deposit_descriptor .derive(child_number, &self.secp_ctx) .inner() .address(self.bitcoind_config.network) .expect("deposit_descriptor is a wsh") } pub fn unvault_address(&self, child_number: ChildNumber) -> Address { self.unvault_descriptor .derive(child_number, &self.secp_ctx) .inner() .address(self.bitcoind_config.network) .expect("unvault_descriptor is a wsh") } pub fn gap_limit(&self) -> u32 { 100 } pub fn watchonly_wallet_name(&self) -> Option<String> { self.wallet_id .map(|ref id| format!("revaultd-watchonly-wallet-{}", id)) } pub fn log_file(&self) -> PathBuf { self.file_from_datadir("log") } pub fn pid_file(&self) -> PathBuf { self.file_from_datadir("revaultd.pid") } pub fn db_file(&self) -> PathBuf { self.file_from_datadir("revaultd.sqlite3") } pub fn watchonly_wallet_file(&self) -> Option<String> { self.watchonly_wallet_name().map(|ref name| { self.file_from_datadir(name) .to_str() .expect("Valid utf-8") .to_string() }) } pub fn rpc_socket_file(&self) -> PathBuf { self.file_from_datadir("revaultd_rpc") } pub fn is_stakeholder(&self) -> bool { self.our_stk_xpub.is_some() } pub fn is_manager(&self) -> bool { self.our_man_xpub.is_some() } pub fn deposit_address(&self) -> Address { self.vault_address(self.current_unused_index) } pub fn last_deposit_address(&self) -> Address { let raw_index: u32 = self.current_unused_index.into(); // FIXME: this should fail instead of creating a hardened index self.vault_address(ChildNumber::from(raw_index + self.gap_limit())) } pub fn last_unvault_address(&self) -> Address { let raw_index: u32 = self.current_unused_index.into(); // FIXME: this should fail instead of creating a hardened index self.unvault_address(ChildNumber::from(raw_index + self.gap_limit())) } /// All deposit addresses as strings up to the gap limit (100) pub fn all_deposit_addresses(&mut self) -> Vec<String> { self.derivation_index_map .keys() .map(|s| { Address::from_script(s, self.bitcoind_config.network) .expect("Created from P2WSH address") .to_string() }) .collect() } /// All unvault addresses as strings up to the gap limit (100) pub fn all_unvault_addresses(&mut self) -> Vec<String> { let raw_index: u32 = self.current_unused_index.into(); (0..raw_index + self.gap_limit()) .map(|raw_index| { // FIXME: this should fail instead of creating a hardened index self.unvault_address(ChildNumber::from(raw_index)) .to_string() }) .collect() } pub fn derived_deposit_descriptor(&self, index: ChildNumber) -> DerivedDepositDescriptor { self.deposit_descriptor.derive(index, &self.secp_ctx) } pub fn derived_unvault_descriptor(&self, index: ChildNumber) -> DerivedUnvaultDescriptor { self.unvault_descriptor.derive(index, &self.secp_ctx) } pub fn derived_cpfp_descriptor(&self, index: ChildNumber) -> DerivedCpfpDescriptor { self.cpfp_descriptor.derive(index, &self.secp_ctx) } pub fn stakeholders_xpubs(&self) -> Vec<DescriptorPublicKey> { self.deposit_descriptor.xpubs() } pub fn managers_xpubs(&self) -> Vec<DescriptorPublicKey> { // The managers' xpubs are all the xpubs from the Unvault descriptor except the // Stakehodlers' ones and the Cosigning Servers' ones. let stk_xpubs = self.stakeholders_xpubs(); self.unvault_descriptor .xpubs() .into_iter() .filter_map(|xpub| { match xpub { DescriptorPublicKey::SinglePub(_) => None, // Cosig DescriptorPublicKey::XPub(_) => { if stk_xpubs.contains(&xpub) { None // Stakeholder } else { Some(xpub) // Manager } } } }) .collect() } pub fn stakeholders_xpubs_at(&self, index: ChildNumber) -> Vec<BitcoinPublicKey> { self.deposit_descriptor .xpubs() .into_iter() .map(|desc_xpub| { desc_xpub .derive(index.into()) .derive_public_key(&self.secp_ctx) .expect("Is derived, and there is never any hardened path") }) .collect() } pub fn our_stk_xpub_at(&self, index: ChildNumber) -> Option<BitcoinPublicKey> { self.our_stk_xpub.map(|xpub| { xpub.derive_pub(&self.secp_ctx, &[index]) .expect("The derivation index stored in the database is sane (unhardened)") .public_key }) } pub fn managers_threshold(&self) -> usize { self.unvault_descriptor .managers_threshold() .unwrap_or(self.managers_xpubs().len()) } } #[cfg(test)] mod tests { use super::RevaultD; use common::config::Config; use std::path::PathBuf; #[test] fn test_from_config() { let mut path = PathBuf::from(file!()).parent().unwrap().to_path_buf(); path.push("../../test_data/invalid_config.toml"); Config::from_file(Some(path.clone())).expect_err("Parsing invalid config file"); path.pop(); path.push("valid_config.toml"); let config = Config::from_file(Some(path)).expect("Parsing valid config file"); RevaultD::from_config(config).expect("Creating state from config"); // TODO: test actual fields.. } }
{ let data_dir_str = self .data_dir .to_str() .expect("Impossible: the datadir path is valid unicode"); [data_dir_str, file_name].iter().collect() }
identifier_body
matgen.go
// Copyright ©2017 The Gonum Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package testlapack import ( "math" "golang.org/x/exp/rand" "gonum.org/v1/gonum/blas" "gonum.org/v1/gonum/blas/blas64" "gonum.org/v1/gonum/floats" ) // Dlatm1 computes the entries of dst as specified by mode, cond and rsign. // // mode describes how dst will be computed: // |mode| == 1: dst[0] = 1 and dst[1:n] = 1/cond // |mode| == 2: dst[:n-1] = 1/cond and dst[n-1] = 1 // |mode| == 3: dst[i] = cond^{-i/(n-1)}, i=0,...,n-1 // |mode| == 4: dst[i] = 1 - i*(1-1/cond)/(n-1) // |mode| == 5: dst[i] = random number in the range (1/cond, 1) such that // their logarithms are uniformly distributed // |mode| == 6: dst[i] = random number from the distribution given by dist // If mode is negative, the order of the elements of dst will be reversed. // For other values of mode Dlatm1 will panic. // // If rsign is true and mode is not ±6, each entry of dst will be multiplied by 1 // or -1 with probability 0.5 // // dist specifies the type of distribution to be used when mode == ±6: // dist == 1: Uniform[0,1) // dist == 2: Uniform[-1,1) // dist == 3: Normal(0,1) // For other values of dist Dlatm1 will panic. // // rnd is used as a source of random numbers. func Dlatm1(dst []float64, mode int, cond float64, rsign bool, dist int, rnd *rand.Rand) { amode := mode if amode < 0 { amode = -amode } if amode < 1 || 6 < amode { panic("testlapack: invalid mode") } if cond < 1 { panic("testlapack: cond < 1") } if amode == 6 && (dist < 1 || 3 < dist) { panic("testlapack: invalid dist") } n := len(dst) if n == 0 { return } switch amode { case 1: dst[0] = 1 for i := 1; i < n; i++ { dst[i] = 1 / cond } case 2: for i := 0; i < n-1; i++ { dst[i] = 1 } dst[n-1] = 1 / cond case 3: dst[0] = 1 if n > 1 { alpha := math.Pow(cond, -1/float64(n-1)) for i := 1; i < n; i++ { dst[i] = math.Pow(alpha, float64(i)) } } case 4: dst[0] = 1 if n > 1 { condInv := 1 / cond alpha := (1 - condInv) / float64(n-1) for i := 1; i < n; i++ { dst[i] = float64(n-i-1)*alpha + condInv } } case 5: alpha := math.Log(1 / cond) for i := range dst { dst[i] = math.Exp(alpha * rnd.Float64()) } case 6: switch dist { case 1: for i := range dst { dst[i] = rnd.Float64() } case 2: for i := range dst { dst[i] = 2*rnd.Float64() - 1 } case 3: for i := range dst { dst[i] = rnd.NormFloat64() } } } if rsign && amode != 6 { for i, v := range dst { if rnd.Float64() < 0.5 { dst[i] = -v } } } if mode < 0 { for i := 0; i < n/2; i++ { dst[i], dst[n-i-1] = dst[n-i-1], dst[i] } } } // Dlagsy generates an n×n symmetric matrix A, by pre- and post- multiplying a // real diagonal matrix D with a random orthogonal matrix: // A = U * D * U^T. // // work must have length at least 2*n, otherwise Dlagsy will panic. // // The parameter k is unused but it must satisfy // 0 <= k <= n-1. func Dlagsy(n, k int, d []float64, a []float64, lda int, rnd *rand.Rand, work []float64) { checkMatrix(n, n, a, lda) if k < 0 || max(0, n-1) < k { panic("testlapack: invalid value of k") } if len(d) != n { panic("testlapack: bad length of d") } if len(work) < 2*n { panic("testlapack: insufficient work length") } // Initialize lower triangle of A to diagonal matrix. for i := 1; i < n; i++ { for j := 0; j < i; j++ { a[i*lda+j] = 0 } } for i := 0; i < n; i++ { a[i*lda+i] = d[i] } bi := blas64.Implementation() // Generate lower triangle of symmetric matrix. for i := n - 2; i >= 0; i-- { for j := 0; j < n-i; j++ { work[j] = rnd.NormFloat64() } wn := bi.Dnrm2(n-i, work[:n-i], 1) wa := math.Copysign(wn, work[0]) var tau float64 if wn != 0 { wb := work[0] + wa bi.Dscal(n-i-1, 1/wb, work[1:n-i], 1) work[0] = 1 tau = wb / wa } // Apply random reflection to A[i:n,i:n] from the left and the // right. // // Compute y := tau * A * u. bi.Dsymv(blas.Lower, n-i, tau, a[i*lda+i:], lda, work[:n-i], 1, 0, work[n:2*n-i], 1) // Compute v := y - 1/2 * tau * ( y, u ) * u. alpha := -0.5 * tau * bi.Ddot(n-i, work[n:2*n-i], 1, work[:n-i], 1) bi.Daxpy(n-i, alpha, work[:n-i], 1, work[n:2*n-i], 1) // Apply the transformation as a rank-2 update to A[i:n,i:n]. bi.Dsyr2(blas.Lower, n-i, -1, work[:n-i], 1, work[n:2*n-i], 1, a[i*lda+i:], lda) } // Store full symmetric matrix. for i := 1; i < n; i++ { for j := 0; j < i; j++ { a[j*lda+i] = a[i*lda+j] } } } // Dlagge generates a real general m×n matrix A, by pre- and post-multiplying // a real diagonal matrix D with random orthogonal matrices: // A = U*D*V. // // d must have length min(m,n), and work must have length m+n, otherwise Dlagge // will panic. // // The parameters ku and kl are unused but they must satisfy // 0 <= kl <= m-1, // 0 <= ku <= n-1. func Dlagge(m, n, kl, ku int, d []float64, a []float64, lda int, rnd *rand.Rand, work []float64) { checkMatrix(m, n, a, lda) if kl < 0 || max(0, m-1) < kl { panic("testlapack: invalid value of kl") } if ku < 0 || max(0, n-1) < ku { panic("testlapack: invalid value of ku") } if len(d) != min(m, n) { panic("testlapack: bad length of d") } if len(work) < m+n { panic("testlapack: insufficient work length") } // Initialize A to diagonal matrix. for i := 0; i < m; i++ { for j := 0; j < n; j++ { a[i*lda+j] = 0 } } for i := 0; i < min(m, n); i++ { a[i*lda+i] = d[i] } // Quick exit if the user wants a diagonal matrix. // if kl == 0 && ku == 0 { // return // } bi := blas64.Implementation() // Pre- and post-multiply A by random orthogonal matrices. for i := min(m, n) - 1; i >= 0; i-- { if i < m-1 { for j := 0; j < m-i; j++ { work[j] = rnd.NormFloat64() } wn := bi.Dnrm2(m-i, work[:m-i], 1) wa := math.Copysign(wn, work[0]) var tau float64 if wn != 0 { wb := work[0] + wa bi.Dscal(m-i-1, 1/wb, work[1:m-i], 1) work[0] = 1 tau = wb / wa } // Multiply A[i:m,i:n] by random reflection from the left. bi.Dgemv(blas.Trans, m-i, n-i, 1, a[i*lda+i:], lda, work[:m-i], 1, 0, work[m:m+n-i], 1) bi.Dger(m-i, n-i, -tau, work[:m-i], 1, work[m:m+n-i], 1, a[i*lda+i:], lda) } if i < n-1 { for j := 0; j < n-i; j++ { work[j] = rnd.NormFloat64() } wn := bi.Dnrm2(n-i, work[:n-i], 1) wa := math.Copysign(wn, work[0]) var tau float64 if wn != 0 { wb := work[0] + wa bi.Dscal(n-i-1, 1/wb, work[1:n-i], 1) work[0] = 1 tau = wb / wa } // Multiply A[i:m,i:n] by random reflection from the right. bi.Dgemv(blas.NoTrans, m-i, n-i, 1, a[i*lda+i:], lda, work[:n-i], 1, 0, work[n:n+m-i], 1) bi.Dger(m-i, n-i, -tau, work[n:n+m-i], 1, work[:n-i], 1, a[i*lda+i:], lda) } } // TODO(vladimir-ch): Reduce number of subdiagonals to kl and number of // superdiagonals to ku. } // dlarnv fills dst with random numbers from a uniform or normal distribution // specified by dist: // dist=1: uniform(0,1), // dist=2: uniform(-1,1), // dist=3: normal(0,1). // For other values of dist dlarnv will panic. func dlarnv(dst []float64, dist int, rnd *rand.Rand) { switch dist { default: panic("testlapack: invalid dist") case 1: for i := range dst { dst[i] = rnd.Float64() } case 2: for i := range dst { dst[i] = 2*rnd.Float64() - 1 } case 3: for i := range dst { dst[i] = rnd.NormFloat64() } } } // dlattr generates an n×n triangular test matrix A with its properties uniquely // determined by imat and uplo, and returns whether A has unit diagonal. If diag // is blas.Unit, the diagonal elements are set so that A[k,k]=k. // // trans specifies whether the matrix A or its transpose will be used. // // If imat is greater than 10, dlattr also generates the right hand side of the // linear system A*x=b, or A^T*x=b. Valid values of imat are 7, and all between 11 // and 19, inclusive. // // b mush have length n, and work must have length 3*n, and dlattr will panic // otherwise. func dlattr(imat int, uplo blas.Uplo, trans blas.Transpose, n int, a []float64, lda int, b, work []float64, rnd *rand.Rand) (diag blas.Diag) { checkMatrix(n, n, a, lda) if len(b) != n { panic("testlapack: bad length of b") } if len(work) < 3*n { panic("testlapack: insufficient length of work") } if uplo != blas.Upper && uplo != blas.Lower { panic("testlapack: bad uplo") } if trans != blas.Trans && trans != blas.NoTrans { panic("testlapack: bad trans") } if n == 0 { return blas.NonUnit } ulp := dlamchE * dlamchB smlnum := dlamchS bignum := (1 - ulp) / smlnum bi := blas64.Implementation() switch imat { default: // TODO(vladimir-ch): Implement the remaining cases. panic("testlapack: invalid or unimplemented imat") case 7: // Identity matrix. The diagonal is set to NaN. diag = blas.Unit switch uplo { case blas.Upper: for i := 0; i < n; i++ { a[i*lda+i] = math.NaN() for j := i + 1; j < n; j++ { a[i*lda+j] = 0 } } case blas.Lower: for i := 0; i < n; i++ { for j := 0; j < i; j++ { a[i*lda+j] = 0 } a[i*lda+i] = math.NaN() } } case 11: // Generate a triangular matrix with elements between -1 and 1, // give the diagonal norm 2 to make it well-conditioned, and // make the right hand side large so that it requires scaling. diag = blas.NonUnit switch uplo { case blas.Upper: for i := 0; i < n-1; i++ { dlarnv(a[i*lda+i:i*lda+n], 2, rnd) } case blas.Lower: for i := 1; i < n; i++ { dlarnv(a[i*lda:i*lda+i+1], 2, rnd) } } for i := 0; i < n; i++ { a[i*lda+i] = math.Copysign(2, a[i*lda+i]) } // Set the right hand side so that the largest value is bignum. dlarnv(b, 2, rnd) imax := bi.Idamax(n, b, 1) bscal := bignum / math.Max(1, b[imax]) bi.Dscal(n, bscal, b, 1) case 12: // Make the first diagonal element in the solve small to cause // immediate overflow when dividing by T[j,j]. The off-diagonal // elements are small (cnorm[j] < 1). diag = blas.NonUnit tscal := 1 / math.Max(1, float64(n-1)) switch uplo { case blas.Upper: for i := 0; i < n; i++ { dlarnv(a[i*lda+i:i*lda+n], 2, rnd) bi.Dscal(n-i-1, tscal, a[i*lda+i+1:], 1) a[i*lda+i] = math.Copysign(1, a[i*lda+i]) } a[(n-1)*lda+n-1] *= smlnum case blas.Lower: for i := 0; i < n; i++ { dlarnv(a[i*lda:i*lda+i+1], 2, rnd) bi.Dscal(i, tscal, a[i*lda:], 1) a[i*lda+i] = math.Copysign(1, a[i*lda+i]) } a[0] *= smlnum } dlarnv(b, 2, rnd) case 13: // Make the first diagonal element in the solve small to cause // immediate overflow when dividing by T[j,j]. The off-diagonal // elements are O(1) (cnorm[j] > 1). diag = blas.NonUnit switch uplo { case blas.Upper: for i := 0; i < n; i++ { dlarnv(a[i*lda+i:i*lda+n], 2, rnd) a[i*lda+i] = math.Copysign(1, a[i*lda+i]) } a[(n-1)*lda+n-1] *= smlnum case blas.Lower: for i := 0; i < n; i++ { dlarnv(a[i*lda:i*lda+i+1], 2, rnd) a[i*lda+i] = math.Copysign(1, a[i*lda+i]) } a[0] *= smlnum } dlarnv(b, 2, rnd) case 14: // T is diagonal with small numbers on the diagonal to // make the growth factor underflow, but a small right hand side // chosen so that the solution does not overflow. diag = blas.NonUnit switch uplo { case blas.Upper: for i := 0; i < n; i++ { for j := i + 1; j < n; j++ { a[i*lda+j] = 0 } if (n-1-i)&0x2 == 0 { a[i*lda+i] = smlnum } else { a[i*lda+i] = 1 } } case blas.Lower: for i := 0; i < n; i++ { for j := 0; j < i; j++ { a[i*lda+j] = 0 } if i&0x2 == 0 { a[i*lda+i] = smlnum } else { a[i*lda+i] = 1 } } } // Set the right hand side alternately zero and small. switch uplo { case blas.Upper: b[0] = 0 for i := n - 1; i > 0; i -= 2 { b[i] = 0 b[i-1] = smlnum } case blas.Lower: for i := 0; i < n-1; i += 2 { b[i] = 0 b[i+1] = smlnum } b[n-1] = 0 } case 15: // Make the diagonal elements small to cause gradual overflow // when dividing by T[j,j]. To control the amount of scaling // needed, the matrix is bidiagonal. diag = blas.NonUnit texp := 1 / math.Max(1, float64(n-1)) tscal := math.Pow(smlnum, texp) switch uplo { case blas.Upper: for i := 0; i < n; i++ { a[i*lda+i] = tscal if i < n-1 { a[i*lda+i+1] = -1 } for j := i + 2; j < n; j++ { a[i*lda+j] = 0 } } case blas.Lower: for i := 0; i < n; i++ { for j := 0; j < i-1; j++ { a[i*lda+j] = 0 } if i > 0 { a[i*lda+i-1] = -1 } a[i*lda+i] = tscal } } dlarnv(b, 2, rnd) case 16: // One zero diagonal element. diag = blas.NonUnit switch uplo { case blas.Upper: for i := 0; i < n; i++ { dlarnv(a[i*lda+i:i*lda+n], 2, rnd) a[i*lda+i] = math.Copysign(2, a[i*lda+i]) } case blas.Lower: for i := 0; i < n; i++ { dlarnv(a[i*lda:i*lda+i+1], 2, rnd) a[i*lda+i] = math.Copysign(2, a[i*lda+i]) } } iy := n / 2 a[iy*lda+iy] = 0 dlarnv(b, 2, rnd) bi.Dscal(n, 2, b, 1) case 17: // Make the offdiagonal elements large to cause overflow when // adding a column of T. In the non-transposed case, the matrix // is constructed to cause overflow when adding a column in // every other step. diag = blas.NonUnit tscal := (1 - ulp) / (dlamchS / ulp) texp := 1.0 switch uplo { case blas.Upper: for i := 0; i < n; i++ { for j := i; j < n; j++ { a[i*lda+j] = 0 } } for j := n - 1; j >= 1; j -= 2 { a[j] = -tscal / float64(n+1) a[j*lda+j] = 1 b[j] = texp * (1 - ulp) a[j-1] = -tscal / float64(n+1) / float64(n+2) a[(j-1)*lda+j-1] = 1 b[j-1] = texp * float64(n*n+n-1) texp *= 2 } b[0] = float64(n+1) / float64(n+2) * tscal case blas.Lower: for i := 0; i < n; i++ { for j := 0; j <= i; j++ { a[i*lda+j] = 0 } } for j := 0; j < n-1; j += 2 { a[(n-1)*lda+j] = -tscal / float64(n+1) a[j*lda+j] = 1 b[j] = texp * (1 - ulp) a[(n-1)*lda+j+1] = -tscal / float64(n+1) / float64(n+2) a[(j+1)*lda+j+1] = 1 b[j+1] = texp * float64(n*n+n-1) texp *= 2 } b[n-1] = float64(n+1) / float64(n+2) * tscal } case 18: // Generate a unit triangular matrix with elements between -1 // and 1, and make the right hand side large so that it requires // scaling. The diagonal is set to NaN. diag = blas.Unit switch uplo { case blas.Upper: for i := 0; i < n; i++ { a[i*lda+i] = math.NaN() dlarnv(a[i*lda+i+1:i*lda+n], 2, rnd) } case blas.Lower: for i := 0; i < n; i++ { dlarnv(a[i*lda:i*lda+i], 2, rnd) a[i*lda+i] = math.NaN() } } // Set the right hand side so that the largest value is bignum. dlarnv(b, 2, rnd) iy := bi.Idamax(n, b, 1) bnorm := math.Abs(b[iy]) bscal := bignum / math.Max(1, bnorm) bi.Dscal(n, bscal, b, 1) case 19: // Generate a triangular matrix with elements between // bignum/(n-1) and bignum so that at least one of the column // norms will exceed bignum. // Dlatrs cannot handle this case for (typically) n>5. diag = blas.NonUnit tleft := bignum / math.Max(1, float64(n-1)) tscal := bignum * (float64(n-1) / math.Max(1, float64(n))) switch uplo { case blas.Upper: for i := 0; i < n; i++ { dlarnv(a[i*lda+i:i*lda+n], 2, rnd) for j := i; j < n; j++ { aij := a[i*lda+j] a[i*lda+j] = math.Copysign(tleft, aij) + tscal*aij } } case blas.Lower: for i := 0; i < n; i++ { dlarnv(a[i*lda:i*lda+i+1], 2, rnd) for j := 0; j <= i; j++ { aij := a[i*lda+j] a[i*lda+j] = math.Copysign(tleft, aij) + tscal*aij } } } dlarnv(b, 2, rnd) bi.Dscal(n, 2, b, 1) } // Flip the matrix if the transpose will be used. if trans == blas.Trans { switch uplo { case blas.Upper: for j := 0; j < n/2; j++ { bi.Dswap(n-2*j-1, a[j*lda+j:], 1, a[(j+1)*lda+n-j-1:], -lda) } case blas.Lower: for j := 0; j < n/2; j++ { bi.Dswap(n-2*j-1, a[j*lda+j:], lda, a[(n-j-1)*lda+j+1:], -1) } } } return diag } func checkMatrix(m, n int, a []float64, lda int) { if m < 0 { panic("testlapack: m < 0") } if n < 0 { panic("testlapack: n < 0") } if lda < max(1, n) { panic("testlapack: lda < max(1, n)") } if len(a) < (m-1)*lda+n { panic("testlapack: insufficient matrix slice length") } } // randomOrthogonal returns an n×n random orthogonal matrix. func randomO
rnd *rand.Rand) blas64.General { q := eye(n, n) x := make([]float64, n) v := make([]float64, n) for j := 0; j < n-1; j++ { // x represents the j-th column of a random matrix. for i := 0; i < j; i++ { x[i] = 0 } for i := j; i < n; i++ { x[i] = rnd.NormFloat64() } // Compute v that represents the elementary reflector that // annihilates the subdiagonal elements of x. reflector(v, x, j) // Compute Q * H_j and store the result into Q. applyReflector(q, q, v) } return q } // reflector generates a Householder reflector v that zeros out subdiagonal // entries in the j-th column of a matrix. func reflector(v, col []float64, j int) { n := len(col) if len(v) != n { panic("slice length mismatch") } if j < 0 || n <= j { panic("invalid column index") } for i := range v { v[i] = 0 } if j == n-1 { return } s := floats.Norm(col[j:], 2) if s == 0 { return } v[j] = col[j] + math.Copysign(s, col[j]) copy(v[j+1:], col[j+1:]) s = floats.Norm(v[j:], 2) floats.Scale(1/s, v[j:]) } // applyReflector computes Q*H where H is a Householder matrix represented by // the Householder reflector v. func applyReflector(qh blas64.General, q blas64.General, v []float64) { n := len(v) if qh.Rows != n || qh.Cols != n { panic("bad size of qh") } if q.Rows != n || q.Cols != n { panic("bad size of q") } qv := make([]float64, n) blas64.Gemv(blas.NoTrans, 1, q, blas64.Vector{Data: v, Inc: 1}, 0, blas64.Vector{Data: qv, Inc: 1}) for i := 0; i < n; i++ { for j := 0; j < n; j++ { qh.Data[i*qh.Stride+j] = q.Data[i*q.Stride+j] } } for i := 0; i < n; i++ { for j := 0; j < n; j++ { qh.Data[i*qh.Stride+j] -= 2 * qv[i] * v[j] } } var norm2 float64 for _, vi := range v { norm2 += vi * vi } norm2inv := 1 / norm2 for i := 0; i < n; i++ { for j := 0; j < n; j++ { qh.Data[i*qh.Stride+j] *= norm2inv } } }
rthogonal(n int,
identifier_name