input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
"辔":"轡", "辕":"轅", "辖":"轄",
"辗":"輾", "辘":"轆", "辙":"轍", "辚":"轔", "辞":"辭", "辩":"辯",
"辫":"辮", "边":"邊", "辺":"邊", "辽":"遼", "达":"達", "迁":"遷",
"过":"過", "迈":"邁", "运":"運", "这":"這", "进":"進", "远":"遠",
"违":"違", "连":"連", "迟":"遲", "迩":"邇", "迳":"逕", "选":"選",
"逊":"遜", "递":"遞", "逦":"邐", "逻":"邏", "遗":"遺", "邓":"鄧",
"邝":"鄺", "邬":"鄔", "邮":"郵", "邹":"鄒", "邺":"鄴", "邻":"鄰",
"郏":"郟", "郐":"鄶", "郑":"鄭", "郓":"鄆", "郦":"酈", "郧":"鄖",
"郸":"鄲", "酱":"醬", "酽":"釅", "酾":"釃", "酿":"釀", "释":"釋",
"鉴":"鑒", "銮":"鑾", "錾":"鏨", "钅":"釒", "钆":"釓", "钇":"釔",
"针":"針", "钉":"釘", "钊":"釗", "钋":"釙", "钌":"釕", "钍":"釷",
"钎":"釺", "钏":"釧", "钐":"釤", "钒":"釩", "钓":"釣", "钔":"鍆",
"钕":"釹", "钗":"釵", "钙":"鈣", "钚":"鈈", "钛":"鈦", "钜":"鉅",
"钝":"鈍", "钞":"鈔", "钠":"鈉", "钡":"鋇", "钢":"鋼", "钣":"鈑",
"钤":"鈐", "钥":"鑰", "钦":"欽", "钧":"鈞", "钨":"鎢", "钩":"鈎",
"钪":"鈧", "钫":"鈁", "钬":"鈥", "钭":"鈄", "钮":"鈕", "钯":"鈀",
"钰":"鈺", "钱":"錢", "钲":"鉦", "钳":"鉗", "钴":"鈷", "钵":"鉢",
"钶":"鈳", "钷":"鉕", "钸":"鈽", "钹":"鈸", "钺":"鉞", "钻":"鑽",
"钼":"鉬", "钽":"鉭", "钾":"鉀", "钿":"鈿", "铀":"鈾", "铁":"鐵",
"铂":"鉑", "铃":"鈴", "铄":"鑠", "铅":"鉛", "铆":"鉚", "铈":"鈰",
"铉":"鉉", "铊":"鉈", "铋":"鉍", "铌":"鈮", "铍":"鈹", "铎":"鐸",
"铐":"銬", "铑":"銠", "铒":"鉺", "铕":"銪", "铖":"鋮", "铗":"鋏",
"铘":"鋣", "铙":"鐃", "铛":"鐺", "铜":"銅", "铝":"鋁", "铞":"銱",
"铟":"銦", "铠":"鎧", "铡":"鍘", "铢":"銖", "铣":"銑", "铤":"鋌",
"铥":"銩", "铧":"鏵", "铨":"銓", "铩":"鎩", "铪":"鉿", "铫":"銚",
"铬":"鉻", "铭":"銘", "铮":"錚", "铯":"銫", "铰":"鉸", "铱":"銥",
"铲":"鏟", "铳":"銃", "铴":"鐋", "铵":"銨", "银":"銀", "铷":"銣",
"铸":"鑄", "铹":"鐒", "铺":"鋪", "铼":"錸", "铽":"鋱", "链":"鏈",
"铿":"鏗", "销":"銷", "锁":"鎖", "锂":"鋰", "锃":"鋥", "锄":"鋤",
"锅":"鍋", "锆":"鋯", "锇":"鋨", "锈":"銹", "锉":"銼", "锊":"鋝",
"锋":"鋒", "锌":"鋅", "锍":"鋶", "锎":"鐦", "锏":"鐧", "锐":"鋭",
"锑":"銻", "锒":"鋃", "锓":"鋟", "锔":"鋦", "锕":"錒", "锖":"錆",
"锗":"鍺", "锘":"鍩", "错":"錯", "锚":"錨", "锛":"錛", "锝":"鍀",
"锞":"錁", "锟":"錕", "锡":"錫", "锢":"錮", "锣":"鑼", "锤":"錘",
"锥":"錐", "锦":"錦", "锨":"鍁", "锩":"錈", "锪":"鍃", "锫":"錇",
"锬":"錟", "锭":"錠", "键":"鍵", "锯":"鋸", "锰":"錳", "锱":"錙",
"锲":"鍥", "锴":"鍇", "锵":"鏘", "锶":"鍶", "锷":"鍔", "锸":"鍤",
"锹":"鍬", "锺":"鍾", "锻":"鍛", "锼":"鎪", "锾":"鍰", "锿":"鎄",
"镀":"鍍", "镁":"鎂", "镂":"鏤", "镄":"鐨", "镅":"鎇", "镆":"鏌",
"镇":"鎮", "镉":"鎘", "镊":"鑷", "镌":"鎸", "镍":"鎳", "镎":"鎿",
"镏":"鎦", "镐":"鎬", "镑":"鎊", "镒":"鎰", "镓":"鎵", "镔":"鑌",
"镖":"鏢", "镗":"鏜", "镘":"鏝", "镙":"鏍", "镛":"鏞", "镜":"鏡",
"镝":"鏑", "镞":"鏃", "镟":"鏇", "镡":"鐔", "镢":"鐝", "镣":"鐐",
"镤":"鏷", "镥":"鑥", "镦":"鐓", "镧":"鑭", "镨":"鐠", "镩":"鑹",
"镪":"鏹", "镫":"鐙", "镬":"鑊", "镭":"鐳", "镯":"鐲", "镰":"鐮",
"镱":"鐿", "镲":"鑔", "镳":"鑣", "镶":"鑲", "长":"長", "门":"門",
"闩":"閂", "闪":"閃", "闫":"閆", "闭":"閉", "问":"問", "闯":"闖",
"闰":"閏", "闱":"闈", "闲":"閑", "闳":"閎", "间":"間", "闵":"閔",
"闶":"閌", "闷":"悶", "闸":"閘", "闹":"鬧", "闺":"閨", "闻":"聞",
"闼":"闥", "闽":"閩", "闾":"閭", "阀":"閥", "阁":"閣", "阂":"閡",
"阃":"閫", "阄":"鬮", "阅":"閲", "阆":"閬", "阈":"閾", "阉":"閹",
"阊":"閶", "阋":"鬩", "阌":"閿", "阍":"閽", "阎":"閻", "阏":"閼",
"阐":"闡", "阑":"闌", "阒":"闃", "阔":"闊", "阕":"闋", "阖":"闔",
"阗":"闐", "阙":"闕", "阚":"闞", "队":"隊", "阳":"陽", "阴":"陰",
"阵":"陣", "阶":"階", "际":"際", "陆":"陸", "陇":"隴", "陈":"陳",
"陉":"陘", "陕":"陝", "陧":"隉", "陨":"隕", "险":"險", "随":"隨",
"隐":"隱", "隶":"隸", "难":"難", "雏":"雛", "雠":"讎", "雳":"靂",
"雾":"霧", "霁":"霽", "霭":"靄", "靓":"靚", "静":"靜", "靥":"靨",
"鞑":"韃", "鞒":"鞽", "鞯":"韉", "韦":"韋", "韧":"韌", "韩":"韓",
"韪":"韙", "韫":"韞", "韬":"韜", "页":"頁", "顶":"頂", "顷":"頃",
"顸":"頇", "项":"項", "顺":"順", "顼":"頊", "顽":"頑", "顾":"顧",
"顿":"頓", "颀":"頎", "颁":"頒", "颂":"頌", "颃":"頏", "预":"預",
"颅":"顱", "领":"領", "颇":"頗", "颈":"頸", "颉":"頡", "颊":"頰",
"颌":"頜", "颍":"潁", "颏":"頦", "颐":"頤", "频":"頻", "颓":"頽",
"颔":"頷", "颖":"穎", "颗":"顆", "题":"題", "颚":"顎", "颛":"顓",
"颜":"顔", "额":"額", "颞":"顳", "颟":"顢", "颠":"顛", "颡":"顙",
"颢":"顥", "颤":"顫", "颥":"顬", "颦":"顰", "颧":"顴", "风":"風",
"飑":"颮", "飒":"颯", "飓":"颶", "飕":"颼", "飘":"飄", "飙":"飆",
"飚":"飈", "飞":"飛", "飨":"饗", "餍":"饜", "饣":"飠", "饥":"饑",
"饧":"餳", "饨":"飩", "饩":"餼", "饪":"飪", "饫":"飫", "饬":"飭",
"饭":"飯", "饮":"飲", "饯":"餞", "饰":"飾", "饱":"飽", "饲":"飼",
"饴":"飴", "饵":"餌", "饶":"饒", "饷":"餉", "饺":"餃", "饼":"餅",
"饽":"餑", "饿":"餓", "馀":"余", "馁":"餒", "馄":"餛", "馅":"餡",
"馆":"館", "馇":"餷", "馈":"饋", "馊":"餿", "馋":"饞", "馍":"饃",
"馏":"餾", "馐":"饈", "馑":"饉", "馒":"饅", "馓":"饊", "馔":"饌",
"馕":"饢", "马":"馬", "驭":"馭", "驮":"馱", "驯":"馴", "驰":"馳",
"驱":"驅", "驳":"駁", "驴":"驢", "驵":"駔", "驶":"駛", "驷":"駟",
"驸":"駙", "驹":"駒", "驺":"騶", "驻":"駐", "驼":"駝", "驽":"駑",
"驾":"駕", "驿":"驛", "骀":"駘", "骁":"驍", "骂":"駡", "骄":"驕",
"骅":"驊", "骆":"駱", "骇":"駭", "骈":"駢", "骊":"驪", "骋":"騁",
"验":"驗", "骏":"駿", "骐":"騏", "骑":"騎", "骒":"騍", "骓":"騅",
"骖":"驂", "骗":"騙", "骘":"騭", "骚":"騷", "骛":"騖", "骜":"驁",
"骝":"騮", "骞":"騫", "骟":"騸", "骠":"驃", "骡":"騾", "骢":"驄",
"骣":"驏", "骤":"驟", "骥":"驥", "骧":"驤", "髅":"髏", "髋":"髖",
"髌":"髕", "鬓":"鬢", "魇":"魘", "魉":"魎", "鱼":"魚", "鱿":"魷",
"鲁":"魯", "鲂":"魴", "鲅":"鮁", "鲆":"鮃", "鲇":"鮎", "鲈":"鱸",
"鲋":"鮒", "鲍":"鮑", "鲎":"鱟", "鲐":"鮐", "鲑":"鮭", "鲒":"鮚",
"鲔":"鮪", "鲕":"鮞", "鲚":"鱭", "鲛":"鮫", "鲜":"鮮", "鲞":"鮝",
"鲟":"鱘", "鲠":"鯁", "鲡":"鱺", "鲢":"鰱", "鲣":"鰹", "鲤":"鯉",
"鲥":"鰣", "鲦":"鰷", "鲧":"鯀", "鲨":"鯊", "鲩":"鯇", "鲫":"鯽",
"鲭":"鯖", "鲮":"鯪", "鲰":"鯫", "鲱":"鯡", "鲲":"鯤", "鲳":"鯧",
"鲴":"鯝", "鲵":"鯢", "鲶":"鯰", "鲷":"鯛", "鲸":"鯨", "鲺":"鯴",
"鲻":"鯔", "鲼":"鱝", "鲽":"鰈", "鳃":"鰓", "鳄":"鰐", "鳅":"鰍",
"鳆":"鰒", "鳇":"鰉", "鳊":"鯿", "鳋":"鰠", "鳌":"鰲", "鳍":"鰭",
"鳎":"鰨", "鳏":"鰥", "鳐":"鰩", "鳓":"鰳", "鳔":"鰾", "鳕":"鱈",
"鳖":"鱉", "鳗":"鰻", "鳘":"鰵", "鳙":"鱅", "鳜":"鱖", "鳝":"鱔",
"鳞":"鱗", "鳟":"鱒", "鳢":"鱧", "鸟":"鳥", "鸠":"鳩", "鸢":"鳶",
"鸣":"鳴", "鸥":"鷗", "鸦":"鴉", "鸨":"鴇", "鸩":"鴆", "鸪":"鴣",
"鸫":"鶇", "鸬":"鸕", "鸭":"鴨", "鸯":"鴦", "鸱":"鴟", "鸲":"鴝",
"鸳":"鴛", "鸵":"鴕", "鸶":"鷥", "鸷":"鷙", "鸸":"鴯", "鸹":"鴰",
"鸺":"鵂", "鸽":"鴿", "鸾":"鸞", "鸿":"鴻", "鹁":"鵓", "鹂":"鸝",
"鹃":"鵑", "鹄":"鵠", "鹅":"鵝", "鹆":"鵒", "鹇":"鷳", "鹈":"鵜",
"鹉":"鵡", "鹊":"鵲", "鹋":"鶓", "鹌":"鵪", "鹎":"鵯", "鹏":"鵬",
"鹑":"鶉", "鹕":"鶘", "鹗":"鶚", "鹘":"鶻", "鹚":"鷀", "鹛":"鶥",
"鹜":"鶩", "鹞":"鷂", "鹣":"鶼", "鹤":"鶴", "鹦":"鸚", "鹧":"鷓",
"鹨":"鷚", "鹩":"鷯", "鹪":"鷦", "鹫":"鷲", "鹬":"鷸", "鹭":"鷺",
"鹰":"鷹", "鹱":"鸌", "鹳":"鸛", "鹾":"鹺", "麦":"麥", "麸":"麩",
"麽":"么", "黉":"黌", "黩":"黷", "黪":"黲", "黾":"黽", "鼋":"黿",
"鼍":"鼉", "齐":"齊", "齑":"齏", "齿":"齒", "龀":"齔", "龃":"齟",
"龄":"齡", "龅":"齙", "龆":"齠", "龇":"齜", "龈":"齦", "龉":"齬",
"龊":"齪", "龋":"齲", "龌":"齷", "龙":"龍", "龚":"龔", "龛":"龕",
"龟":"龜"
}
trad = {
"乾":"乾干", "后":"后後", "壹":"壹壸", "夥":"夥伙", "師":"帅师",
"後":"後后", "徵":"徵征", "捨":"舍舎", "摺":"摺折", "棗":"枣栆",
"瀋":"沈渖", "獲":"荻获", "當":"当带", "總":"总怼", "膽":"胆幞",
"藉":"藉借", "車":"車车", "辦":"刅办", "適":"适逃", "遽":"遽业",
"邃":"邃还", "邊":"边辺", "鍾":"钟锺", "鏇":"镟旋", "閣":"合阁",
"隹":"隹只", "顰":"颦显", "飛":"飛飞", "麽":"麽么", "龜":"龟",
"亂":"乱", "亜":"亚", "亞":"亚", "佇":"伫", "來":"来", "侖":"仑",
"係":"系", "俠":"侠", "倀":"伥", "倆":"俩", "倉":"仓", "個":"个",
"們":"们", "倫":"伦", "偉":"伟", "側":"侧", "偵":"侦", "傖":"伧",
"傘":"伞", "備":"备", "傢":"家", "傭":"佣", "傳":"传", "傴":"伛",
"債":"债", "傷":"伤", "傾":"倾", "僂":"偻", "僅":"仅", "僉":"佥",
"僑":"侨", "僕":"仆", "僞":"伪", "僥":"侥", "僨":"偾", "價":"价",
"儀":"仪", "儂":"侬", "億":"亿", "儈":"侩", "儉":"俭", "儐":"傧",
"儔":"俦", "儕":"侪", "儘":"尽", "償":"偿", "優":"优", "儲":"储",
"儷":"俪", "儺":"傩", "儻":"傥", "儼":"俨", "兒":"儿", "兩":"两",
"凈":"净", "凍":"冻", "凖":"准", "凱":"凯", "剄":"刭", "則":"则",
"剋":"克", "剛":"刚", "剮":"剐", "剴":"剀", "創":"创", "劃":"划",
"劇":"剧", "劉":"刘", "劊":"刽", "劌":"刿", "劍":"剑", "劑":"剂",
"勁":"劲", "動":"动", "務":"务", "勛":"勋", "勝":"胜", "勞":"劳",
"勢":"势", "勱":"劢", "勵":"励", "勸":"劝", "匭":"匦", "匯":"汇",
"匱":"匮", "區":"区", "協":"协", "厙":"厍", "厠":"厕", "厭":"厌",
"厲":"厉", "厴":"厣", "參":"参", "叢":"丛", "咼":"呙", "員":"员",
"唄":"呗", "問":"问", "啓":"启", "啞":"哑", "啟":"启", "喚":"唤",
"喪":"丧", "喬":"乔", "單":"单", "喲":"哟", "嗆":"呛", "嗇":"啬",
"嗎":"吗", "嗚":"呜", "嗩":"唢", "嗶":"哔", "嘆":"叹", "嘍":"喽",
"嘔":"呕", "嘖":"啧", "嘗":"尝", "嘜":"唛", "嘩":"哗", "嘮":"唠",
"嘯":"啸", "嘰":"叽", "嘵":"哓", "嘸":"呒", "噝":"咝", "噠":"哒",
"噥":"哝", "噦":"哕", "噯":"嗳", "噲":"哙", "噴":"喷", "噸":"吨",
"噹":"带", "嚀":"咛", "嚇":"吓", "嚌":"哜", "嚕":"噜", "嚙":"啮",
"嚦":"呖", "嚨":"咙", "嚮":"向", "嚳":"喾", "嚴":"严", "嚶":"嘤",
"囀":"啭", "囁":"嗫", "囂":"嚣", "囅":"冁", "囈":"呓", "囌":"苏",
"囑":"嘱", "圇":"囵", "國":"国", "圍":"围", "園":"园", "圓":"圆",
"圖":"图", "團":"团", "埡":"垭", "執":"执", "堅":"坚", "堊":"垩",
"堖":"垴", "堝":"埚", "堯":"尧", "報":"报", "場":"场", "塊":"块",
"塋":"茔", "塏":"垲", "塒":"埘", "塗":"涂", "塢":"坞", "塤":"埙",
"塵":"尘", "塹":"堑", "墊":"垫", "墜":"坠", "墮":"堕", "墳":"坟",
"墻":"墙", "墾":"垦", "壇":"坛", "壓":"压", "壘":"垒", "壙":"圹",
"壚":"垆", "壞":"坏", "壟":"垄", "壠":"垅", "壢":"坜", "壩":"坝",
"壯":"壮", "壺":"壶", "壽":"寿", "夢":"梦", "夾":"夹", "奐":"奂",
"奩":"奁", "奪":"夺", "奬":"奖", "奮":"奋", "妝":"妆", "婁":"娄",
"婦":"妇", "婭":"娅", "媧":"娲", "媽":"妈", "嫗":"妪", "嫵":"妩",
"嫻":"娴", "嬀":"妫", "嬈":"娆", "嬋":"婵", "嬌":"娇", "嬙":"嫱",
"嬡":"嫒", "嬪":"嫔", "嬰":"婴", "嬸":"婶", "孌":"娈", "孫":"孙",
"學":"学", "孿":"孪", "寢":"寝", "實":"实", "寧":"宁", "審":"审",
"寫":"写", "寬":"宽", "寵":"宠", "寶":"宝", "將":"将", "專":"专",
"尋":"寻", "對":"对", "導":"导", "尷":"尴", "屢":"屡", "層":"层",
"屨":"屦", "屬":"属", "岡":"冈", "峴":"岘", "島":"岛", "峽":"峡",
"崍":"崃", "崗":"岗", "崢":"峥", "崬":"岽", "嵐":"岚", "嶁":"嵝",
"嶄":"崭", "嶇":"岖", "嶗":"崂", "嶠":"峤", "嶧":"峄", "嶸":"嵘",
"嶺":"岭", "嶼":"屿", "巋":"岿", "巒":"峦", "巔":"巅", "巰":"巯",
"帥":"帅", "帳":"帐", "帶":"带", "幀":"帧", "幃":"帏", "幗":"帼",
"幘":"帻", "幚":"帮", "幟":"帜", "幣":"币", "幫":"帮", "幬":"帱",
"幹":"干", "幾":"几", "庫":"库", "廈":"庆", "廟":"庙", "廠":"厂",
"廡":"庑", "廢":"废", "廣":"广", "廬":"庐", "廳":"厅", "弳":"弪",
"張":"张", "彆":"別", "彈":"弹", "彌":"弥", "彎":"弯", "彙":"汇",
"徑":"径", "從":"从", "徠":"徕", "復":"复", "徹":"彻", "悵":"怅",
"悶":"闷", "惡":"恶", "惱":"恼", "惲":"恽", "惻":"恻", "愛":"爱",
"愜":"惬", "愴":"怆", "愷":"恺", "愾":"忾", "態":"态", "慘":"惨",
"慚":"惭", "慟":"恸", "慣":"惯", "慤":"悫", "慪":"怄", "慫":"怂",
"慮":"虑", "慳":"悭", "慶":"庆", "憂":"忧", "憊":"惫", "憐":"怜",
"憑":"凭", "憒":"愦", "憚":"惮", "憤":"愤", "憫":"悯", "憮":"怃",
"憲":"宪", "憶":"忆", "懇":"恳", "應":"应", "懌":"怿", "懞":"蒙",
"懟":"怼", "懣":"懑", "懨":"恹", "懲":"惩", "懶":"懒", "懷":"怀",
"懸":"悬", "懺":"忏", "懼":"惧", "懾":"慑", "戀":"恋", "戇":"戆",
"戔":"戋", "戧":"戗", "戩":"戬", "戰":"战", "戲":"戏", "挾":"挟",
"捫":"扪", "捲":"卷", "掃":"扫", "掄":"抡", "掙":"挣", "揀":"拣",
"揚":"扬", "換":"换", "揮":"挥", "損":"损", "搗":"捣", "搶":"抢",
"摑":"掴", "摜":"掼", "摟":"搂", "摯":"挚", "摳":"抠", "摶":"抟",
"摻":"掺", "撈":"捞", "撓":"挠", "撟":"挢", "撣":"掸", "撥":"拨",
"撫":"抚", "撲":"扑", "撳":"揿", "撻":"挞", "撾":"挝", "撿":"捡",
"擁":"拥", "擄":"掳", "擇":"择", "擊":"击", "擋":"挡", "擔":"担",
"據":"据", "擠":"挤", "擬":"拟", "擯":"摈", "擰":"拧", "擱":"搁",
"擲":"掷", "擴":"扩", "擷":"撷", "擺":"摆", "擻":"擞", "擼":"撸",
"擾":"扰", "攄":"摅", "攆":"撵", "攏":"拢", "攔":"拦", "攖":"撄",
"攙":"搀", "攛":"撺", "攝":"摄", "攢":"攒", "攣":"挛", "攤":"摊",
"攪":"搅", "攬":"揽", "敗":"败", "敵":"敌", "數":"数", "斂":"敛",
"斃":"毙", "斕":"斓", "斬":"斩", "斷":"断", "時":"时", "晉":"晋",
"晝":"昼", "暈":"晕", "暉":"晖", "暢":"畅", "暫":"暂", "曄":"晔",
"曆":"历", "曇":"昙", "曉":"晓", "曖":"暧", "曠":"旷", "曬":"晒",
"書":"书", "會":"会", "朧":"胧", "東":"东", "梘":"枧", "條":"条",
"梟":"枭", "棄":"弃", "棖":"枨", "棟":"栋", "棧":"栈", "棲":"栖",
"椏":"桠", "楊":"杨", "楓":"枫", "楨":"桢", "業":"业", "極":"极",
"榪":"杩", "榮":"荣", "榿":"桤", "構":"构", "槍":"枪", "様":"样",
"槧":"椠", "槳":"桨", "樁":"桩", "樂":"乐", "樅":"枞", "樓":"楼",
"標":"标", "樞":"枢", "樣":"样", "樸":"朴", "樹":"树", "樺":"桦",
"橈":"桡", "橋":"桥", "機":"机", "橢":"椭", "檉":"柽", "檔":"档",
"檜":"桧", "檢":"检", "檣":"樯", "檯":"台", "檳":"槟", "檸":"柠",
"檻":"槛", "櫃":"柜", "櫓":"橹", "櫚":"榈", "櫛":"栉", "櫝":"椟",
"櫞":"橼", "櫟":"栎", "櫧":"槠", "櫨":"栌", "櫪":"枥", "櫬":"榇",
"櫳":"栊", "櫸":"榉", "櫻":"樱", "欄":"栏", "權":"权", "欏":"椤",
"欒":"栾", "欖":"榄", "欗":"栏", "欞":"棂", "欽":"钦", "歐":"欧",
"歟":"欤", "歡":"欢", "歲":"岁", "歳":"岁", "歷":"历", "歸":"归",
"殘":"残", "殞":"殒", "殤":"殇", "殫":"殚", "殮":"殓", "殯":"殡",
"殲":"歼", "殺":"杀", "殻":"壳", "殼":"壳", "毆":"殴", "毿":"毵",
"氈":"毡", "氌":"氇", "氣":"气", "氫":"氢", "氬":"氩", "浹":"浃",
"涇":"泾", "淪":"沦", "淵":"渊", "淶":"涞", "淺":"浅", "渙":"涣",
"渦":"涡", "測":"测", "渾":"浑", "湞":"浈", "湯":"汤", "準":"准",
"溝":"沟", "滄":"沧", "滅":"灭", "滌":"涤", "滎":"荥", "滬":"沪",
"滯":"滞", "滲":"渗", "滷":"卤", "滸":"浒", "滿":"满", "漁":"渔",
"漚":"沤", "漢":"汉", "漣":"涟", "漬":"渍", "漲":"涨", "漸":"渐",
"漿":"浆", "潁":"颍", "潑":"泼", "潔":"洁", "潙":"沩", "潛":"潜",
"潤":"润", "潯":"浔", "潰":"溃", "潷":"滗", "潿":"涠", "澀":"涩",
"澆":"浇", "澇":"涝", "澗":"涧", "澠":"渑", "澤":"泽", "澩":"泶",
"澮":"浍", "澱":"淀", "濁":"浊", "濃":"浓", "濕":"湿", "濘":"泞",
"濛":"蒙", "濟":"济", "濤":"涛", "濫":"滥", "濰":"潍", "濱":"滨",
"濺":"溅", "濼":"泺", "濾":"滤", "瀅":"滢", "瀆":"渎", "瀉":"泻",
"瀏":"浏", "瀕":"濒", "瀘":"泸", "瀝":"沥", "瀟":"潇", "瀠":"潆",
"瀧":"泷", "瀨":"濑", "瀲":"潋", "瀾":"澜", "灃":"沣", "灄":"滠",
"灑":"洒", "灕":"漓", "灘":"滩", "灝":"灏", "灣":"湾", "灤":"滦",
"灧":"滟", "為":"为", "烏":"乌", "烴":"烃", "無":"无", "煉":"炼",
"煒":"炜", "煢":"茕", "煥":"焕", "煩":"烦", "煬":"炀", "熒":"荧",
"熗":"炝", "熱":"热", "熾":"炽", "燁":"烨", "燈":"灯", "燒":"烧",
"燙":"烫", "燜":"焖", "營":"营", "燦":"灿", "燭":"烛", "燴":"烩",
"燼":"烬", "燾":"焘", "爍":"烁", "爐":"炉", "爛":"烂", "爤":"烂",
"爭":"争", "爲":"为", "爺":"爷", "爾":"尔", "牘":"牍", "牽":"牵",
"犖":"荦", "犢":"犊", "犧":"牺", "狀":"状", "狹":"狭", "狽":"狈",
"猙":"狰", "猶":"犹", "猻":"狲", "獁":"犸", "獄":"狱", "獅":"狮",
"獨":"独", "獪":"狯", "獫":"猃", "獰":"狞", "獵":"猎", "獷":"犷",
"獸":"兽", "獺":"獭", "獻":"献", "獼":"猕", "玀":"猡", "現":"现",
"琿":"珲", "瑋":"玮", "瑣":"琐", "瑩":"莹", "瑪":"玛", "璉":"琏",
"璣":"玑", "璦":"瑷", "環":"环", "璽":"玺", "瓊":"琼", "瓏":"珑",
"瓔":"璎", "瓚":"瓒", "甌":"瓯", "產":"产", "産":"产", "畆":"亩",
"畝":"亩", "畢":"毕", "畫":"画", "疇":"畴", "疊":"叠", "痙":"痉",
"瘂":"痖", "瘋":"疯", "瘍":"疡", "瘓":"痪", "瘞":"瘗", "瘡":"疮",
"瘧":"疟", "瘻":"瘘", "療":"疗", "癆":"痨", "癇":"痫", "癉":"瘅",
"癘":"疠", "癟":"瘪", "癢":"痒", "癤":"疖", "癥":"症", "癧":"疬",
"癩":"癞", "癬":"癣", "癭":"瘿", "癮":"瘾", "癰":"痈", "癱":"瘫",
"癲":"癫", "發":"发", "皚":"皑", "皸":"皲", "皺":"皱", "盞":"盏",
"盡":"尽", "監":"监", "盤":"盘", "盧":"卢", "眾":"众", "睏":"困",
"睜":"睁", "睞":"睐", "瞘":"眍", "瞞":"瞒", "瞭":"了", "瞼":"睑",
"矇":"蒙", "矚":"瞩", "矯":"矫", "硃":"朱", "硤":"硖", "硨":"砗",
"硯":"砚", "碩":"硕", "碭":"砀", "碸":"砜", "確":"确", "碼":"码",
"磚":"砖", "磣":"碜", "磧":"碛", "磯":"矶", "磽":"硗", "礎":"础",
"礙":"碍", "礦":"矿", "礪":"砺", "礫":"砾", "礬":"矾", "礱":"砻",
"祇":"只", "禍":"祸", "禎":"祯", "禦":"御", "禪":"禅", "禮":"礼",
"禰":"祢", "禱":"祷", "種":"种", "稱":"称", "穀":"谷", "穌":"稣",
"積":"积", "穎":"颖", "穡":"穑", "穢":"秽", "穩":"稳", "穫":"荻",
"窩":"窝", "窪":"洼", "窮":"穷", "窶":"窭", "窺":"窥", "竄":"窜",
"竅":"窍", "竇":"窦", "竈":"灶", "竊":"窃", "竪":"竖", "競":"竞",
"筆":"笔", "筧":"笕", "箋":"笺", "箏":"筝", "節":"节", "範":"范",
"築":"筑", "篋":"箧", "篤":"笃", "篩":"筛", "篳":"筚", "簀":"箦",
"簍":"篓", "簞":"箪", "簡":"简", "簣":"篑", "簫":"箫", "簽":"签",
"簾":"帘", "籃":"篮", "籌":"筹", "籜":"箨", "籟":"籁", "籠":"笼",
"籤":"签", "籩":"笾", "籪":"簖", "籬":"篱", "籮":"箩", "糝":"糁",
"糞":"粪", "糧":"粮", "糰":"团", "糲":"粝", "糴":"籴", "糶":"粜",
"糹":"纟", "糾":"纠", "紀":"纪", "紂":"纣", "約":"约", "紅":"红",
"紆":"纡", "紇":"纥", | |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 22 13:11:38 2012
@author: proto
"""
from pyparsing import Word, Suppress,Optional,alphanums,Group,ZeroOrMore
import numpy as np
import json
import itertools
import utils.structures as st
from copy import deepcopy,copy
import detectOntology
import re
import difflib
from utils.util import logMess
from collections import defaultdict
import itertools
import math
from collections import Counter
import re
from utils.util import pmemoize as memoize
'''
This file in general classifies rules according to the information contained in
the json config file for classyfying rules according to their reactants/products
'''
@memoize
def get_close_matches(match, dataset, cutoff=0.6):
return difflib.get_close_matches(match, dataset, cutoff=cutoff)
@memoize
def sequenceMatcher(a,b):
'''
compares two strings ignoring underscores
'''
return difflib.SequenceMatcher(lambda x:x == '_',a,b).ratio()
name = Word(alphanums + '_-') + ':'
species = (Word(alphanums + "_" + ":#-")
+ Suppress('()') + Optional(Suppress('@' + Word(alphanums + '_-')))) + ZeroOrMore(Suppress('+') + Word(alphanums + "_" + ":#-")
+ Suppress("()") + Optional(Suppress('@' + Word(alphanums + '_-'))))
rate = Word(alphanums + "()")
grammar = Suppress(Optional(name)) + ((Group(species) | '0') + Suppress(Optional("<") + "->") + (Group(species) | '0') + Suppress(rate))
@memoize
def parseReactions(reaction, specialSymbols=''):
if reaction.startswith('#'):
return None
result = grammar.parseString(reaction).asList()
if len(result) < 2:
result = [result, []]
if '<->' in reaction and len(result[0]) == 1 and len(result[1]) == 2:
result.reverse()
return result
def addToDependencyGraph(dependencyGraph, label, value):
if label not in dependencyGraph:
dependencyGraph[label] = []
if value not in dependencyGraph[label] and value != []:
dependencyGraph[label].append(value)
class SBMLAnalyzer:
def __init__(self, modelParser, configurationFile, namingConventions, speciesEquivalences=None, conservationOfMass = True):
self.modelParser = modelParser
self.configurationFile = configurationFile
self.namingConventions = detectOntology.loadOntology(namingConventions)
self.userNamingConventions = copy(self.namingConventions)
self.speciesEquivalences = speciesEquivalences
self.userEquivalencesDict = None
self.lexicalSpecies = []
self.conservationOfMass = conservationOfMass
def distanceToModification(self, particle, modifiedElement, translationKeys):
posparticlePos = [m.start() + len(particle) for m in re.finditer(particle, modifiedElement)]
preparticlePos = [m.start() for m in re.finditer(particle, modifiedElement)]
keyPos = [m.start() for m in re.finditer(translationKeys, modifiedElement)]
distance = [abs(y-x) for x in posparticlePos for y in keyPos]
distance.extend([abs(y-x) for x in preparticlePos for y in keyPos])
distance.append(9999)
return min(distance)
def fuzzyArtificialReaction(self,baseElements,modifiedElement,molecules):
'''
in case we don't know how a species is composed but we know its base
elements, try to get it by concatenating its basic reactants
'''
import collections
compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
equivalenceTranslator,translationKeys,conventionDict = self.processNamingConventions2(molecules)
indirectEquivalenceTranslator= {x:[] for x in equivalenceTranslator}
self.processFuzzyReaction([baseElements,modifiedElement],translationKeys,conventionDict,indirectEquivalenceTranslator)
newBaseElements = baseElements
for modification in indirectEquivalenceTranslator:
for element in indirectEquivalenceTranslator[modification]:
newBaseElements = [element[2][1] if x==element[2][0] else x for x in newBaseElements]
if compare(baseElements,newBaseElements):
return None
return newBaseElements
def analyzeSpeciesModification2(self, baseElement, modifiedElement, partialAnalysis):
"""
A method to read modifications within complexes.
"""
def index_min(values):
return min(xrange(len(values)), key=values.__getitem__)
equivalenceTranslator, translationKeys, conventionDict = self.processNamingConventions2([baseElement, modifiedElement])
differencePosition = [(i, x) for i, x in enumerate(difflib.ndiff(baseElement, modifiedElement)) if x.startswith('+')]
tmp = ''
lastIdx = 0
newDifferencePosition = []
for i in range(len(differencePosition)):
tmp += differencePosition[i][1][-1]
if tmp in translationKeys:
newDifferencePosition.append(((differencePosition[lastIdx][0] + differencePosition[i][0]) / 2, tmp))
tmp = ''
lastIdx = i
differencePosition = newDifferencePosition
if len(differencePosition) == 0:
return None, None, None
sortedPartialAnalysis = sorted(partialAnalysis, key=len, reverse=True)
tokenPosition = []
tmpModifiedElement = modifiedElement
for token in sortedPartialAnalysis:
sequenceMatcher = difflib.SequenceMatcher(None, token, tmpModifiedElement)
#sequenceMatcher2 = difflib.SequenceMatcher(None,token,baseElement)
modifiedMatchingBlocks = [m.span() for m in re.finditer(token, tmpModifiedElement)]
baseMatchingBlocks = [m.span() for m in re.finditer(token, baseElement)]
#matchingBlocks = [x for x in modifiedMatchingBlocks for y in baseMatching Blocks if ]
if len(modifiedMatchingBlocks) > 0 and len(baseMatchingBlocks) > 0:
#select the matching block with the lowest distance to the base matching block
matchingBlockIdx = index_min([min([abs((y[1]+y[0])/2 - (x[1]+x[0])/2) for y in baseMatchingBlocks]) for x in modifiedMatchingBlocks])
matchingBlock = modifiedMatchingBlocks[matchingBlockIdx]
tmpModifiedElement = list(tmpModifiedElement)
for idx in range(matchingBlock[0],matchingBlock[1]):
tmpModifiedElement[idx] = '_'
tmpModifiedElement = ''.join(tmpModifiedElement)
tokenPosition.append((matchingBlock[0],matchingBlock[1]-1))
else:
#try fuzzy search
sequenceMatcher = difflib.SequenceMatcher(None,token,tmpModifiedElement)
match = ''.join(tmpModifiedElement[j:j+n] for i, j, n in sequenceMatcher.get_matching_blocks() if n)
if (len(match)) / float(len(token)) < 0.8:
tokenPosition.append([999999999])
else:
tmp = [i for i, y in enumerate(difflib.ndiff(token, tmpModifiedElement)) if not y.startswith('+')]
if tmp[-1] - tmp[0] > len(token) + 5:
tokenPosition.append([999999999])
continue
tmpModifiedElement = list(tmpModifiedElement)
for idx in tmp:
if idx< len(tmpModifiedElement):
tmpModifiedElement[idx] = '_'
tmpModifiedElement = ''.join(tmpModifiedElement)
tmp = [tmp[0],tmp[-1]-1]
tokenPosition.append(tmp)
intersection = []
for difference in differencePosition:
distance = []
for token in tokenPosition:
distance.append(min([abs(difference[0] - subtoken) for subtoken in token]))
closestToken = sortedPartialAnalysis[index_min(distance)]
#if difference[1] in conventionDict:
intersection.append([difference[1],closestToken,min(distance)])
minimumToken = min(intersection,key=lambda x:x[2])
if intersection:
return minimumToken[1],translationKeys, equivalenceTranslator
return None, None, None
def analyzeSpeciesModification(self, baseElement, modifiedElement, partialAnalysis):
'''
a method for trying to read modifications within complexes
This is only possible once we know their internal structure
(this method is called after the creation and resolving of the dependency
graph)
'''
equivalenceTranslator, translationKeys, conventionDict = self.processNamingConventions2([baseElement, modifiedElement])
scores = []
if len(translationKeys) == 0:
'''
there's no clear lexical path between reactant and product
'''
return None, None, None
for particle in partialAnalysis:
distance = 9999
comparisonElement = max(baseElement, modifiedElement, key=len)
if re.search('(_|^){0}(_|$)'.format(particle), comparisonElement) == None:
distance = self.distanceToModification(particle, comparisonElement, translationKeys[0])
score = difflib.ndiff(particle, modifiedElement)
else:
# FIXME: make sure we only do a search on those variables that are viable
# candidates. this is once again fuzzy string matchign. there should
# be a better way of doing this with difflib
permutations = set(['_'.join(x) for x in itertools.permutations(partialAnalysis, 2) if x[0] == particle])
if all([x not in modifiedElement for x in permutations]):
distance = self.distanceToModification(particle, comparisonElement, translationKeys[0])
score = difflib.ndiff(particle, modifiedElement)
# FIXME:tis is just an ad-hoc parameter in terms of how far a mod is from a species name
# use something better
if distance < 4:
scores.append([particle, distance])
if len(scores) > 0:
winner = scores[[x[1] for x in scores].index(min([x[1] for x in scores]))][0]
else:
winner = None
if winner:
return winner, translationKeys, equivalenceTranslator
return None, None, None
def findMatchingModification(self, particle, species):
@memoize
def findMatchingModificationHelper(particle, species):
difference = difflib.ndiff(species,particle)
differenceList = tuple([x for x in difference if '+' in x])
if differenceList in self.namingConventions['patterns']:
return [self.namingConventions['patterns'][differenceList]]
fuzzyKey = ''.join([x[2:] for x in differenceList])
differenceList = self.testAgainstExistingConventions(fuzzyKey,self.namingConventions['modificationList'])
#can we state the modification as the combination of multiple modifications
if differenceList:
classificationList = []
for x in differenceList[0]:
differenceKey = tuple(['+ {0}'.format(letter) for letter in x])
classificationList.append(self.namingConventions['patterns'][differenceKey])
return classificationList
return None
return findMatchingModificationHelper(particle,species)
def greedyModificationMatching(self,speciesString, referenceSpecies):
'''
recursive function trying to map a given species string to a string permutation of the strings in reference species
>>> sa = SBMLAnalyzer(None,'./config/reactionDefinitions.json','./config/namingConventions.json')
>>> sorted(sa.greedyModificationMatching('EGF_EGFR',['EGF','EGFR']))
['EGF', 'EGFR']
>>> sorted(sa.greedyModificationMatching('EGF_EGFR_2_P_Grb2',['EGF','EGFR','EGF_EGFR_2_P','Grb2']))
['EGF_EGFR_2_P', 'Grb2']
>>> sorted(sa.greedyModificationMatching('A_B_C_D',['A','B','C','C_D','A_B_C','A_B']))
['A_B', 'C_D']
'''
bestMatch = ['', 0]
finalMatches = []
blacklist = []
while(len(blacklist)< len(referenceSpecies)):
localReferenceSpecies = [x for x in referenceSpecies if x not in blacklist and len(x) <= len(speciesString)]
for species in localReferenceSpecies:
if species in speciesString and len(species) > bestMatch[1] and species != speciesString:
bestMatch = [species,len(species)]
if bestMatch != ['', 0]:
result = self.greedyModificationMatching(speciesString.replace(bestMatch[0],''), referenceSpecies)
finalMatches = [bestMatch[0]]
if result == -1:
finalMatches = []
blacklist.append(bestMatch[0])
bestMatch = ['',0]
continue
elif result != -2:
finalMatches.extend(result)
break
elif len([x for x in speciesString if x != '_']) > 0:
return -1
else:
return -2
return finalMatches
def findClosestModification(self, particles, species, annotationDict, originalDependencyGraph):
'''
maps a set of particles to the complete set of species using lexical analysis. This step is done
independent of the reaction network.
'''
equivalenceTranslator = {}
dependencyGraph = {}
localSpeciesDict = defaultdict(lambda : defaultdict(list))
def analyzeByParticle(splitparticle,species,
equivalenceTranslator=equivalenceTranslator,
dependencyGraph=dependencyGraph):
basicElements = []
composingElements = []
splitpindex = -1
#for splitpindex in range(0,len(splitparticle)):
while (splitpindex + 1)< len(splitparticle):
splitpindex += 1
splitp = splitparticle[splitpindex]
if splitp in species:
closestList = [splitp]
similarList = get_close_matches(splitp,species)
similarList = [x for x in similarList if x != splitp and len(x) < len(splitp)]
similarList = [[x,splitp] for x in similarList]
if len(similarList) > 0:
for similarity in similarList:
#compare close lexical proximity
fuzzyList = self.processAdHocNamingConventions(similarity[0],
similarity[1],localSpeciesDict,False,species)
for reaction,tag,modifier in fuzzyList:
if modifier != None and all(['-' not in x for x in modifier]):
logMess('INFO:LAE001','Lexical relationship inferred between \
{0}, user information confirming it is required'.format(similarity))
else:
closestList = get_close_matches(splitp,species)
closestList = [x for x in closestList if len(x) < len(splitp)]
#if theres nothing in the species list i can find a lexical
#neighbor from, then try to create one based on my two
#positional neighbors
if closestList == []:
flag= True
#do | |
#
# adventure module
#
# vim: et sw=2 ts=2 sts=2
# for Python3, use:
# import urllib.request as urllib2
import urllib2
import random
import string
import textwrap
import time
# "directions" are all the ways you can describe going some way;
# they are code-visible names for directions for adventure authors
direction_names = ["NORTH","SOUTH","EAST","WEST","UP","DOWN","RIGHT","LEFT",
"IN","OUT","FORWARD","BACK",
"NORTHWEST","NORTHEAST","SOUTHWEST","SOUTHEAST"]
direction_list = [ NORTH, SOUTH, EAST, WEST, UP, DOWN, RIGHT, LEFT,
IN, OUT, FORWARD, BACK,
NORTHWEST, NORTHEAST, SOUTHWEST, SOUTHEAST] = \
range(len(direction_names))
NOT_DIRECTION = None
# some old names, for backwards compatibility
(NORTH_WEST, NORTH_EAST, SOUTH_WEST, SOUTH_EAST) = \
(NORTHWEST, NORTHEAST, SOUTHWEST, SOUTHEAST)
directions = dir_by_name = dict(zip(direction_names, direction_list))
def define_direction (number, name):
if name in dir_by_name:
exit("%s is already defined as %d" % (name, dir_by_name[name]))
dir_by_name[name] = number
def lookup_dir (name):
return dir_by_name.get(name, NOT_DIRECTION)
# add lower-case versions of all names in direction_names
for name in direction_names:
define_direction(dir_by_name[name], name.lower())
# add common aliases:
# maybe the alias mechanism should be a more general
# (text-based?) mechanism that works for any command?!!!
common_aliases = [
(NORTH, "n"),
(SOUTH, "s"),
(EAST, "e"),
(WEST, "w"),
(UP, "u"),
(DOWN, "d"),
(FORWARD, "fd"),
(FORWARD, "fwd"),
(FORWARD, "f"),
(BACK, "bk"),
(BACK, "b"),
(NORTHWEST,"nw"),
(NORTHEAST,"ne"),
(SOUTHWEST,"sw"),
(SOUTHEAST, "se")
]
for (k,v) in common_aliases:
define_direction(k,v)
# define the pairs of opposite directions
opposite_by_dir = {}
def define_opposite_dirs (d1, d2):
for dir in (d1, d2):
opposite = opposite_by_dir.get(dir)
if opposite is not None:
exit("opposite for %s is already defined as %s" % (dir, opposite))
opposite_by_dir[d1] = d2
opposite_by_dir[d2] = d1
opposites = [(NORTH, SOUTH),
(EAST, WEST),
(UP, DOWN),
(LEFT, RIGHT),
(IN, OUT),
(FORWARD, BACK),
(NORTHWEST, SOUTHEAST),
(NORTHEAST, SOUTHWEST)]
for (d1,d2) in opposites:
define_opposite_dirs(d1,d2)
def opposite_direction (dir):
return opposite_by_dir[dir]
# registered games
registered_games = {}
FEEDBACK = 0
TITLE = 1
DESCRIPTION = 2
CONTENTS = 3
DEBUG = 4
class Colors:
'''
Colors class:
reset all colors with colors.reset
two subclasses fg for foreground and bg for background.
use as colors.subclass.colorname.
i.e. colors.fg.red or colors.bg.green
also, the generic bold, disable, underline, reverse, strikethrough,
and invisible work with the main class
i.e. colors.bold
'''
reset='\033[0m'
bold='\033[01m'
disable='\033[02m'
underline='\033[04m'
reverse='\033[07m'
strikethrough='\033[09m'
invisible='\033[08m'
class FG:
black='\033[30m'
red='\033[31m'
green='\033[32m'
orange='\033[33m'
blue='\033[34m'
purple='\033[35m'
cyan='\033[36m'
lightgrey='\033[37m'
darkgrey='\033[90m'
lightred='\033[91m'
lightgreen='\033[92m'
yellow='\033[93m'
lightblue='\033[94m'
pink='\033[95m'
lightcyan='\033[96m'
class BG:
black='\033[40m'
red='\033[41m'
green='\033[42m'
orange='\033[43m'
blue='\033[44m'
purple='\033[45m'
cyan='\033[46m'
lightgrey='\033[47m'
articles = ['a', 'an', 'the']
# some prepositions to recognize indirect objects in prepositional phrases
prepositions = ['aboard', 'about', 'above', 'across', 'after', 'against', 'along'
'among', 'around', 'at', 'atop', 'before', 'behind', 'below', 'beneath',
'beside', 'besides', 'between', 'beyond', 'by', 'for', 'from', 'in', 'including'
'inside', 'into', 'on', 'onto', 'outside', 'over', 'past', 'than' 'through', 'to',
'toward', 'under', 'underneath', 'onto', 'upon', 'with', 'within']
# changes "lock" to "a lock", "apple" to "an apple", etc.
# note that no article should be added to proper names;
# For now we'll just assume
# anything starting with upper case is proper.
# Do not add an article to plural nouns.
def add_article (name):
# simple plural test
if len(name) > 1 and name[-1] == 's' and name[-2] != 's':
return name
# check if there is already an article on the string
if name.split()[0] in articles:
return name
consonants = "bcdfghjklmnpqrstvwxyz"
vowels = "aeiou"
if name and (name[0] in vowels):
article = "an "
elif name and (name[0] in consonants):
article = "a "
else:
article = ""
return "%s%s" % (article, name)
def normalize_input(text):
superfluous = articles + ['and']
rest = []
for word in text.split():
word = "".join(l for l in word if l not in string.punctuation)
if word not in superfluous:
rest.append(word)
return ' '.join(rest)
def proper_list_from_dict(d):
names = d.keys()
buf = []
name_count = len(names)
for (i,name) in enumerate(names):
if i != 0:
buf.append(", " if name_count > 2 else " ")
if i == name_count-1 and name_count > 1:
buf.append("and ")
buf.append(add_article(name))
return "".join(buf)
# Base is a place to put default inplementations of methods that everything
# in the game should support (eg save/restore, how to respond to verbs etc)
class Base(object):
def __init__(self, name):
self.game = None
self.name = name
self.verbs = {}
self.phrases = {}
self.vars = {}
def flag(self, f):
if f in self.vars:
return self.vars[f]
else:
return False
def set_flag(self, f):
self.vars[f] = True
def unset_flag(self, f):
if f in self.vars:
del self.vars[f]
def var(self, var):
if var in self.vars:
return self.vars[var]
else:
return None
def set_var(self, var, val):
self.vars[var] = val
def unset_var(self, var):
if var in self.vars:
del self.vars[var]
def add_verb(self, v):
self.verbs[' '.join(v.name.split())] = v
v.bind_to(self)
return v
def get_verb(self, verb):
c = ' '.join(verb.split())
if c in self.verbs:
return self.verbs[c]
else:
return None
def add_phrase(self, phrase, f, requirements = []):
if isinstance(f, BaseVerb):
f.bind_to(self)
self.phrases[' '.join(phrase.split())] = (f, set(requirements))
def get_phrase(self, phrase, things_present):
phrase = phrase.strip()
things_present = set(things_present)
if not phrase in self.phrases:
return None
p = self.phrases[phrase]
if things_present.issuperset(p[1]):
return p[0]
return None
def output(self, text, message_type = 0):
self.game.output(text, message_type)
class BaseVerb(Base):
def __init__(self, function, name):
Base.__init__(self, name)
self.function = function
self.bound_to = None
def bind_to(self, obj):
self.bound_to = obj
def act(self, actor, noun, words):
result = True
if not self.function(actor, noun, None):
result = False
# treat 'verb noun1 and noun2..' as 'verb noun1' then 'verb noun2'
# treat 'verb noun1, noun2...' as 'verb noun1' then 'verb noun2'
# if any of the nouns work on the verb consider the command successful,
# even if some of them don't
if words:
for noun in words:
if self.function(actor, noun, None):
result = True
return result
class Die(BaseVerb):
def __init__(self, string, name = ""):
BaseVerb.__init__(self, None, name)
self.string = string
def act(self, actor, noun, words):
self.bound_to.game.output("%s %s %s" % (actor.name.capitalize(),
actor.isare, self.string), FEEDBACK)
self.bound_to.game.output("%s %s dead." % (actor.name.capitalize(),
actor.isare), FEEDBACK)
actor.terminate()
return True
class Say(BaseVerb):
def __init__(self, string, name = ""):
BaseVerb.__init__(self, None, name)
self.string = string
def act(self, actor, noun, words):
self.bound_to.game.output(self.string, FEEDBACK)
return True
class SayOnNoun(Say):
def __init__(self, string, noun, name = ""):
Say.__init__(self, string, name)
self.noun = noun
def act(self, actor, noun, words):
if self.noun != noun:
return False
self.bound_to.game.output(self.string, FEEDBACK)
return True
class SayOnSelf(SayOnNoun):
def __init__(self, string, name = ""):
SayOnNoun.__init__(self, string, None, name)
# Verb is used for passing in an unbound global function to the constructor
class Verb(BaseVerb):
def __init__(self, function, name = ""):
BaseVerb.__init__(self, function, name)
# explicitly pass in self to the unbound function
def act(self, actor, noun, words):
return self.function(self.bound_to, actor, noun, words)
def list_prefix(a, b): # is a a prefix of b
if not a:
return True
if not b:
return False
if a[0] != b[0]:
return False
return list_prefix(a[1:], b[1:])
def get_noun(words, things):
if words[0] in articles:
if len(words) > 1:
done = False
for t in things:
n = t.name.split()
if list_prefix(n, words[1:]):
noun = t.name
words = words[len(n)+1:]
done = True
break
if not done:
noun = words[1]
words = words[2:]
else:
done = False
for t in things:
n = t.name.split()
if list_prefix(n, words):
noun = t.name
words = words[len(n):]
done = True
break
if not done:
noun = words[0]
words = words[1:]
return (noun, words)
# A class to hold utility methods useful during game development, but
# not needed for normal game play. Import the advent_devtools module
# to get the full version of the tools.
class DevToolsBase(object):
def __init__(self):
self.game = None
def set_game(self, game):
self.game = game
def debug_output(self, text, level):
return
def start(self):
return
global _devtools
_devtools = DevToolsBase()
def register_devtools(devtools):
global _devtools
_devtools = devtools
# The Game: container for hero, locations, robots, animals etc.
class Game(Base):
def __init__(self, name="bwx-adventure"):
Base.__init__(self, name)
self.objects = {}
self.fresh_location = False
self.player = None
self.current_actor = None
self.location_list = []
self.robots = {}
self.animals = {}
global _devtools
self.devtools = _devtools
self.devtools.set_game(self)
self.http_output = False
self.http_text = ""
self.done = False
def set_name(self, name):
self.name = name
# add a bidirectional connection between points A and B
def add_connection(self, connection):
connection.game = self
if isinstance(connection.way_ab, (list, tuple)):
for way in connection.way_ab:
connection.point_a.add_exit(connection, way)
else:
connection.point_a.add_exit(connection, connection.way_ab)
# this is messy, need a better way to do this
reverse_connection = Connection(connection.name,
connection.point_b,
connection.point_a,
connection.way_ba,
connection.way_ab)
reverse_connection.game = self
if isinstance(connection.way_ba, (list, tuple)):
for way in connection.way_ba:
connection.point_b.add_exit(reverse_connection, way)
else:
connection.point_b.add_exit(reverse_connection, connection.way_ba)
return connection
def | |
<gh_stars>0
import math
from datetime import datetime
import torch
from torch.nn.functional import mse_loss, binary_cross_entropy_with_logits
from torch import optim
from .loss import mask_nll_loss, bpr_loss, lambda_rank_loss, rank_hinge_loss
from .data import ReviewGroupDataLoader, basic_builder, ReviewBuilder
from .evaluate import test_rate_ndcg, test_review_bleu, test_rate_rmse
from .search_decoder import SearchDecoder, GumbelDecoder
from .voc import voc
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device("cuda" if USE_CUDA else "cpu")
class AbstractTrainer:
''' Abstract Trainer Pipeline '''
def __init__(
self,
model,
ckpt_mng,
batch_size=64,
lr=.01,
l2=0,
clip=1.,
patience=5,
max_iters=None,
save_every=5,
grp_config=None
):
self.model = model
self.ckpt_mng = ckpt_mng
self.batch_size = batch_size
self.optimizer = optim.Adam(
model.parameters(),
lr=lr,
weight_decay=l2
)
self.clip = clip
# trained epochs
self.trained_epoch = 0
self.train_results = []
self.val_results = []
self.best_epoch = self._best_epoch()
self.collate_fn = basic_builder
self.patience = patience
self.max_iters = float('inf') if max_iters is None else max_iters
self.save_every = save_every
self.ckpt_name = lambda epoch: str(epoch)
self.grp_config = grp_config
# self.optim_scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=10, gamma=0.5)
def log(self, *args):
'''formatted log output for training'''
time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f'{time} ', *args)
def resume(self, checkpoint):
'''load checkpoint'''
self.trained_epoch = checkpoint['epoch']
self.train_results = checkpoint['train_results']
self.val_results = checkpoint['val_results']
self.optimizer.load_state_dict(checkpoint['opt'])
self.best_epoch = self._best_epoch()
def reset_epoch(self):
self.trained_epoch = 0
self.train_results = []
self.val_results = []
self.best_epoch = self._best_epoch()
def run_batch(self, training_batch, val=False):
'''
Run a batch of any batch size with the model
Inputs:
training_batch: train data batch created by batch_2_seq
val: if it is for validation, no backward & optim
Outputs:
result: tuple (loss, *other_stats) of numbers or element tensor
loss: a loss tensor to optimize
other_stats: any other values to accumulate
'''
pass
def run_epoch(self, train_data, dev_data):
trainloader = ReviewGroupDataLoader(train_data, collate_fn=self.collate_fn, grp_config=self.grp_config, batch_size=self.batch_size, shuffle=True, num_workers=4)
# maximum iteration per epoch
iter_len = min(self.max_iters, len(trainloader))
# culculate print every to ensure ard 5 logs per epoch
PRINT_EVERY = 10 ** round(math.log10(iter_len / 5))
while True:
epoch = self.trained_epoch + 1
self.model.train()
results_sum = []
for idx, training_batch in enumerate(trainloader):
if idx >= iter_len:
break
# run a training iteration with batch
training_batch.to(DEVICE)
batch_result = self.run_batch(training_batch)
if type(batch_result) != tuple:
batch_result = (batch_result,)
loss = batch_result[0]
self.optimizer.zero_grad()
loss.backward()
# Clip gradients: gradients are modified in place
if self.clip:
_ = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
# Adjust model weights
self.optimizer.step()
# Accumulate results
self._accum_results(results_sum, batch_result)
# Print progress
iteration = idx + 1
if iteration % PRINT_EVERY == 0:
print_result = self._sum_to_result(results_sum, iteration)
self.log('Epoch {}; Iter: {} {:.1f}%; {};'.format(epoch, iteration, iteration / iter_len * 100, self._result_to_str(print_result)))
epoch_result = self._sum_to_result(results_sum, iteration)
self.train_results.append(epoch_result)
# validation
with torch.no_grad():
self.model.eval()
val_result = self.validate(dev_data)
self.model.train()
self.log('Validation; Epoch {}; {};'.format(epoch, self._result_to_str(val_result)))
self.val_results.append(val_result)
# new best if no prev best or the sort key is smaller than prev best's
is_new_best = self.best_epoch is None or \
self._result_sort_key(val_result) < self._result_sort_key(self.val_results[self.best_epoch-1])
self._handle_ckpt(epoch, is_new_best)
self.trained_epoch += 1
if is_new_best:
self.best_epoch = epoch
# self.optim_scheduler.step()
yield is_new_best
def train(self, train_data, dev_data):
patience = self.patience # end the function when reaching threshold
epoch = self.trained_epoch + 1
# Data loaders with custom batch builder
self.log(f'Start training from epoch {epoch}...')
run_epoch = self.run_epoch(train_data, dev_data)
while patience:
is_new_best = next(run_epoch)
# if better than before, recover patience; otherwise, lose patience
if is_new_best:
patience = self.patience
else:
patience -= 1
best_result = self.val_results[self.best_epoch-1]
self.log('Training ends: best result {} at epoch {}'.format(self._result_to_str(best_result), self.best_epoch))
def validate(self, dev_data):
devloader = ReviewGroupDataLoader(dev_data, collate_fn=self.collate_fn, grp_config=self.grp_config, batch_size=self.batch_size, shuffle=False)
results_sum = []
for dev_batch in devloader:
dev_batch.to(DEVICE)
result = self.run_batch(dev_batch, val=True)
if type(result) != tuple:
result = (result,)
# Accumulate results
self._accum_results(results_sum, result)
return self._sum_to_result(results_sum, len(devloader))
def _result_to_str(self, epoch_result):
''' convert result list to readable string '''
return 'Loss: {:.4f}'.format(epoch_result)
def _sum_to_result(self, results_sum, length):
'''
Convert accumulated sum of results to epoch result
by default return the average batch loss
'''
loss_sum = results_sum[0]
return loss_sum / length
def _accum_results(self, results_sum, batch_result):
''' accumulate batch result of run batch '''
while len(results_sum) < len(batch_result):
results_sum.append(0)
for i, val in enumerate(batch_result):
results_sum[i] += val.item() if torch.is_tensor(val) else val
def _result_sort_key(self, result):
''' return the sorting value of a result, the smaller the better '''
return result
def _best_epoch(self):
'''
get the epoch of best result, smallest sort key value, from results savings when resumed from checkpoint
'''
best_val, best_epoch = math.inf, None
for i, result in enumerate(self.val_results):
val = self._result_sort_key(result)
if val < best_val:
best_val = val
best_epoch = i + 1
return best_epoch
def _handle_ckpt(self, epoch, is_new_best):
'''
Always save a checkpoint for the latest epoch
Remove the checkpoint for the previous epoch
If the latest is the new best record, remove the previous best
Regular saves are exempted from removes
'''
# save new checkpoint
cp_name = self.ckpt_name(epoch)
self.ckpt_mng.save(cp_name, {
'epoch': epoch,
'train_results': self.train_results,
'val_results': self.val_results,
'model': self.model.state_dict(),
'opt': self.optimizer.state_dict()
}, best=is_new_best)
self.log('Save checkpoint:', cp_name)
epochs_to_purge = []
# remove previous non-best checkpoint
prev_epoch = epoch - 1
if prev_epoch != self.best_epoch:
epochs_to_purge.append(prev_epoch)
# remove previous best checkpoint
if is_new_best and self.best_epoch:
epochs_to_purge.append(self.best_epoch)
for e in epochs_to_purge:
if e % self.save_every != 0:
cp_name = self.ckpt_name(e)
self.ckpt_mng.delete(cp_name)
self.log('Delete checkpoint:', cp_name)
class RankerTrainer(AbstractTrainer):
''' Trainer to train recommendation ranking model '''
def __init__(self, *args, rank_loss_type=None, loss_lambda=None, **kargs):
super().__init__(*args, **kargs)
self.rank_loss_type = rank_loss_type
self.loss_lambda = loss_lambda
if rank_loss_type:
self.rank_loss_fn = {
'RankHinge': rank_hinge_loss,
'BPR': bpr_loss,
'LambdaRank': lambda_rank_loss
}[rank_loss_type]
def run_batch(self, batch_data, val=False):
'''
Outputs:
loss: tensor, overall loss to optimize
'''
rate_lam, rank_lam = self.loss_lambda['rate'], self.loss_lambda['rank']
# extract fields from batch & set DEVICE options
scores = batch_data.scores
pred_scores = self.model.rate(batch_data)
if self.rank_loss_type:
grp_size = batch_data.grp_size
pred_scores, scores = (t.view(-1, grp_size) for t in (pred_scores, scores))
# only apply mse to rated items
rate_loss = mse_loss(pred_scores[:, 0], scores[:, 0])
rank_loss = self.rank_loss_fn(pred_scores, scores)
else:
rate_loss = mse_loss(pred_scores, scores)
rank_loss = 0
loss = rate_lam * rate_loss + rank_lam * rank_loss
return loss
def validate(self, dev_data):
if self.rank_loss_type == 'LambdaRank':
# loss is pointless in LambdaRank
val_result = 0.
else:
val_result = super().validate(dev_data)
rmse = test_rate_rmse(dev_data, self.model, builder=self.collate_fn, batch_size=self.batch_size)
ndcg, pure_ndcg = test_rate_ndcg(dev_data, self.model, builder=self.collate_fn, batch_size=self.batch_size // 16)
return val_result, rmse, ndcg, pure_ndcg
def _result_to_str(self, epoch_result):
if type(epoch_result) == tuple:
s = 'LOSS: {:.4f}; RMSE: {:.4f}; NDCG: {:.4f}; P_NDCG: {:.4f}'.format(*epoch_result)
else:
s = 'LOSS: {:.4f}'.format(epoch_result)
return s
def _result_sort_key(self, result):
''' MSE loss '''
return result[1]
class ReviewTrainer(AbstractTrainer):
''' Trainer to train review generation model '''
def __init__(self, *args, **kargs):
super().__init__(*args, **kargs)
self.collate_fn = ReviewBuilder(need_scores=False)
model = self.model
for module in [model.user_ebd, model.item_ebd, model.ui_mlp, model.rater]:
for parameter in module.parameters():
parameter.requires_grad = False
def run_batch(self, batch_data, val=False):
'''
Inputs:
batch_data: (users, items, scores, words, mask)
users: (batch_size, grp_size)
items: (batch_size, grp_size)
scores: (batch_size, grp_size)
words: (seq_size, batch_size * grp_size)
mask: (seq_size, batch_size * grp_size)
Outputs:
loss: tensor, overall loss to optimize
loss_sum: number, sum of the loss
n_words: number of words
'''
words, mask = batch_data.words, batch_data.mask
# concat sos at the top & remove eos at the bottom
sos_var = torch.full((1, words.size(-1)), voc.sos_idx, dtype=torch.long, device=DEVICE)
inp = torch.cat([sos_var, words[:-1]])
output_dict = self.model.review(batch_data, inp)
review_loss = mask_nll_loss(output_dict.output, words, mask)
sen_l1_loss = output_dict.rate_gates.masked_select(mask).mean()
review_loss += 5e-3 * sen_l1_loss
n_words = mask.sum().item()
return review_loss, review_loss.item() * n_words, n_words
def train(self, train_data, dev_data):
train_data, dev_data = train_data.rvw_subset(), dev_data.rvw_subset()
return super().train(train_data, dev_data)
def _sum_to_result(self, results_sum, length):
loss_sum = results_sum[1]
n_words = results_sum[2]
return loss_sum / n_words
def validate(self, dev_data):
val_result = super().validate(dev_data)
# TODO: replace hardcoded searcher
bleu2, bleu4, _ = test_review_bleu(dev_data.random_subset(10 ** 4), SearchDecoder(self.model, voc, max_length=30, greedy=False, topk=10))
return (val_result, bleu2, bleu4)
def _result_to_str(self, epoch_result):
if type(epoch_result) == tuple:
loss, rest = epoch_result[0], epoch_result[1:]
else:
loss, rest = epoch_result, None
s = 'Loss: {:.4f}'.format(loss)
if rest:
s += '; BLEU2: {:.4f}; BLEU4: {:.4f}'.format(*rest)
return s
def _result_sort_key(self, result):
''' review loss '''
return result[0]
class MultiTaskTrainer(RankerTrainer):
''' Trainer to train multi-task model '''
def __init__(self, *args, **kargs):
super().__init__(*args, **kargs)
self.collate_fn = ReviewBuilder()
def run_batch(self, batch_data, val=False):
'''
Outputs:
loss: tensor, overall loss to optimize
rate_loss: number, recomm loss
review_loss: number, review loss
n_words
'''
review_lam = self.loss_lambda['review']
words, mask = batch_data.words, batch_data.mask
# concat sos at the top & remove eos at the bottom
sos_var = torch.full((1, words.size(-1)), voc.sos_idx, dtype=torch.long, device=DEVICE)
inp = torch.cat([sos_var, words[:-1]])
| |
range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(SparkShotReceived)
SparkShotReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 5:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(SparkShotReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Snakebyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(SearchSnakeReceived)
SearchSnakeReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 6:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(SearchSnakeReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Needlebyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(NeedleCannonReceived)
NeedleCannonReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 6:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(NeedleCannonReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Hardbyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(HardKnuckleReceived)
HardKnuckleReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 4:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(HardKnuckleReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Topbyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
for x in range(len(TopSpinReceived)):#Writes second part
ROM.write(TopSpinReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Geminibyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(GeminiLaserReceived)
GeminiLaserReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 6:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(GeminiLaserReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Magnetbyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(MagnetMissileReceived)
MagnetMissileReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 6:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(MagnetMissileReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif weapons[y] == Shadowbyte:
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(ShadowBladeReceived)
ShadowBladeReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 6:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(ShadowBladeReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == | |
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torchvision
from tqdm import tqdm
from data.modelnet_loader_torch import ModelNetCls
from models import pcrnet
from src import ChamferDistance, FPSSampler, RandomSampler, SampleNet
from src import sputils
from src.pctransforms import OnUnitCube, PointcloudToTensor
from src.qdataset import QuaternionFixedDataset, QuaternionTransform, rad_to_deg
torch.manual_seed(0)
# addpath('../')
# sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
LOGGER.addHandler(logging.StreamHandler(sys.stdout))
# dump to GLOBALS dictionary
GLOBALS = None
def append_to_GLOBALS(key, value):
try:
GLOBALS[key].append(value)
except KeyError:
GLOBALS[key] = []
GLOBALS[key].append(value)
# fmt: off
def options(argv=None, parser=None):
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--outfile', required=True, type=str,
metavar='BASENAME', help='output filename (prefix)') # the result: ${BASENAME}_model_best.pth
parser.add_argument('--datafolder', required=True, type=str, help='dataset folder')
# For testing
parser.add_argument('--test', action='store_true',
help='Perform testing routine. Otherwise, the script will train.')
# Default pointnet behavior is 'fixed'.
# Loading options:
# --transfer-from: load a pretrained PCRNET model.
# --resume: load an ongoing training SP-PCRNET model.
# --pretrained: load a pretrained SP-PCRNET model (like resume, but reset starting epoch)
parser.add_argument('--loss-type', default=0, choices=[0, 1], type=int,
metavar='TYPE', help='Supervised (0) or Unsupervised (1)')
parser.add_argument('--sampler', required=True, choices=['fps', 'samplenet', 'random', 'none'], type=str,
help='Sampling method.')
parser.add_argument('--transfer-from', type=str,
metavar='PATH', help='path to trained pcrnet')
parser.add_argument('--train-pcrnet', action='store_true',
help='Allow PCRNet training.')
parser.add_argument('--train-samplenet', action='store_true',
help='Allow SampleNet training.')
parser.add_argument('--num-sampled-clouds', choices=[1, 2], type=int, default=2,
help='Number of point clouds to sample (Source / Source + Template)')
# settings for on training
parser.add_argument('--workers', default=4, type=int,
metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=32, type=int,
metavar='N', help='mini-batch size (default: 32)')
parser.add_argument('--epochs', default=400, type=int,
metavar='N', help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int,
metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('--optimizer', default='Adam', choices=['Adam', 'SGD', 'RMSProp'],
metavar='METHOD', help='name of an optimizer (default: Adam)')
parser.add_argument('--resume', default='', type=str,
metavar='PATH', help='path to latest checkpoint (default: null (no-use))')
parser.add_argument('--pretrained', default='', type=str,
metavar='PATH', help='path to pretrained model file (default: null (no-use))')
parser.add_argument('--device', default='cuda:0', type=str,
metavar='DEVICE', help='use CUDA if available')
args = parser.parse_args(argv)
return args
# fmt: on
def main(args, dbg=False):
global GLOBALS
if dbg:
GLOBALS = {}
trainset, testset = get_datasets(args)
action = Action(args)
if args.test:
test(args, testset, action)
else:
train(args, trainset, testset, action)
return GLOBALS
def test(args, testset, action):
if not torch.cuda.is_available():
args.device = "cpu"
args.device = torch.device(args.device)
model = action.create_model()
# action.try_transfer(model, args.pretrained)
if args.pretrained:
assert os.path.isfile(args.pretrained)
model.load_state_dict(torch.load(args.pretrained, map_location="cpu"))
model.to(args.device)
model.eval() # Batch norms etc. configured for testing mode.
# Dataloader
testloader = torch.utils.data.DataLoader(
testset, batch_size=1, shuffle=False, num_workers=args.workers
)
action.test_1(model, testloader, args.device, epoch=0)
def train(args, trainset, testset, action):
if not torch.cuda.is_available():
args.device = "cpu"
args.device = torch.device(args.device)
model = action.create_model()
# action.try_transfer(model, args.pretrained)
if args.pretrained:
assert os.path.isfile(args.pretrained)
model.load_state_dict(torch.load(args.pretrained, map_location="cpu"))
model.to(args.device)
checkpoint = None
if args.resume:
assert os.path.isfile(args.resume)
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint["epoch"]
model.load_state_dict(checkpoint["model"])
# dataloader
testloader = torch.utils.data.DataLoader(
testset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers
)
# Optimizer
min_loss = float("inf")
learnable_params = filter(lambda p: p.requires_grad, model.parameters())
if args.optimizer == "Adam":
optimizer = torch.optim.Adam(learnable_params, lr=1e-3)
elif args.optimizer == "RMSProp":
optimizer = torch.optim.RMSprop(learnable_params, lr=0.001)
else:
optimizer = torch.optim.SGD(learnable_params, lr=0.001, momentum=0.9)
if checkpoint is not None:
min_loss = checkpoint["min_loss"]
optimizer.load_state_dict(checkpoint["optimizer"])
# training
LOGGER.debug("train, begin")
for epoch in range(args.start_epoch, args.epochs):
train_loss, train_rotation_error = action.train_1(
model, trainloader, optimizer, args.device, epoch
)
val_loss, val_rotation_error = action.eval_1(
model, testloader, args.device, epoch
)
# scheduler.step()
is_best = val_loss < min_loss
min_loss = min(val_loss, min_loss)
LOGGER.info(
"epoch, %04d, train_loss=%f, train_rotation_error=%f, val_loss=%f, val_rotation_error=%f",
epoch + 1,
train_loss,
train_rotation_error,
val_loss,
val_rotation_error,
)
snap = {
"epoch": epoch + 1,
"model": model.state_dict(),
"min_loss": min_loss,
"optimizer": optimizer.state_dict(),
}
if is_best:
save_checkpoint(snap, args.outfile, "snap_best")
save_checkpoint(model.state_dict(), args.outfile, "model_best")
save_checkpoint(snap, args.outfile, "snap_last")
save_checkpoint(model.state_dict(), args.outfile, "model_last")
LOGGER.debug("train, end")
def save_checkpoint(state, filename, suffix):
torch.save(state, "{}_{}.pth".format(filename, suffix))
class Action:
def __init__(self, args):
self.experiment_name = args.pretrained
self.transfer_from = args.transfer_from
self.p0_zero_mean = True
self.p1_zero_mean = True
self.LOSS_TYPE = args.loss_type
# SampleNet:
self.ALPHA = args.alpha # Sampling loss
self.LMBDA = args.lmbda # Projection loss
self.GAMMA = args.gamma # Inside sampling loss - linear.
self.DELTA = args.delta # Inside sampling loss - point cloud size factor.
self.NUM_IN_POINTS = args.num_in_points
self.NUM_OUT_POINTS = args.num_out_points
self.BOTTLNECK_SIZE = args.bottleneck_size
self.GROUP_SIZE = args.projection_group_size
self.SKIP_PROJECTION = args.skip_projection
self.SAMPLER = args.sampler
self.TRAIN_SAMPLENET = args.train_samplenet
self.TRAIN_PCRNET = args.train_pcrnet
self.NUM_SAMPLED_CLOUDS = args.num_sampled_clouds
def create_model(self):
# Create Task network and load pretrained feature weights if requested
pcrnet_model = pcrnet.PCRNet(input_shape="bnc")
if self.TRAIN_PCRNET:
pcrnet_model.requires_grad_(True)
pcrnet_model.train()
else:
pcrnet_model.requires_grad_(False)
pcrnet_model.eval()
# Create sampling network
if self.SAMPLER == "samplenet":
sampler = SampleNet(
num_out_points=self.NUM_OUT_POINTS,
bottleneck_size=self.BOTTLNECK_SIZE,
group_size=self.GROUP_SIZE,
initial_temperature=1.0,
input_shape="bnc",
output_shape="bnc",
skip_projection=self.SKIP_PROJECTION,
)
if self.TRAIN_SAMPLENET:
sampler.requires_grad_(True)
sampler.train()
else:
sampler.requires_grad_(False)
sampler.eval()
elif self.SAMPLER == "fps":
sampler = FPSSampler(
self.NUM_OUT_POINTS, permute=True, input_shape="bnc", output_shape="bnc"
)
elif self.SAMPLER == "random":
sampler = RandomSampler(
self.NUM_OUT_POINTS, input_shape="bnc", output_shape="bnc"
)
else:
sampler = None
# Load pcrnet baseline weights
self.try_transfer(pcrnet_model, self.transfer_from)
# Attach sampler to pcrnet_model
pcrnet_model.sampler = sampler
return pcrnet_model
@staticmethod
def try_transfer(model, path):
if path is not None:
model.load_state_dict(torch.load(path, map_location="cpu"))
LOGGER.info(f"Model loaded from {path}")
def train_1(self, model, trainloader, optimizer, device, epoch):
vloss = 0.0
gloss = 0.0
count = 0
for i, data in enumerate(tqdm(trainloader)):
# Sample using one of the samplers:
if model.sampler is not None and model.sampler.name == "samplenet":
(
sampler_loss,
sampled_data,
sampler_loss_info,
) = self.compute_samplenet_loss(model, data, device)
simplification_loss = sampler_loss_info["simplification_loss"]
projection_loss = sampler_loss_info["projection_loss"]
elif model.sampler is not None and model.sampler.name == "fps":
sampled_data = self.non_learned_sampling(model, data, device)
simplification_loss = torch.tensor(0, dtype=torch.float32)
projection_loss = torch.tensor(0, dtype=torch.float32)
sampler_loss = torch.tensor(0, dtype=torch.float32)
else:
sampled_data = data
simplification_loss = torch.tensor(0, dtype=torch.float32)
projection_loss = torch.tensor(0, dtype=torch.float32)
sampler_loss = torch.tensor(0, dtype=torch.float32)
pcrnet_loss, pcrnet_loss_info = self.compute_pcrnet_loss(
model, sampled_data, device, epoch
)
chamfer_loss = pcrnet_loss_info["chamfer_loss"]
rotation_error = pcrnet_loss_info["rot_err"]
norm_err = pcrnet_loss_info["norm_err"]
trans_err = pcrnet_loss_info["trans_err"]
# print(
# f"data sample {i:3.0f}: simplification_loss={simplification_loss:.4f}, projection_loss={projection_loss:.4f}, chamfer_loss={chamfer_loss:.4f}, rotation_error={rotation_error:.4f}, norm_err={norm_err:.4f}, trans_err={trans_err:.4f}"
# )
# SampleNet loss is already factorized by ALPHA and LMBDA hyper parameters.
loss = pcrnet_loss + sampler_loss
optimizer.zero_grad()
loss.backward()
# grad_norm = torch.nn.utils.clip_grad_norm_(optimizer.param_groups[0]['params'], max_norm=10.0)
optimizer.step()
vloss1 = loss.item()
vloss += vloss1
gloss1 = rotation_error.item()
gloss += gloss1
count += 1
ave_vloss = float(vloss) / count
ave_gloss = float(gloss) / count
return ave_vloss, ave_gloss
def eval_1(self, model, testloader, device, epoch):
vloss = 0.0
gloss = 0.0
# Shift to eval mode for BN / Projection layers
task_state = model.training
if model.sampler is not None:
sampler_state = model.sampler.training
model.eval()
count = 0
with torch.no_grad():
for i, data in enumerate(testloader):
# Sample using one of the samplers:
if model.sampler is not None and model.sampler.name == "samplenet":
(
sampler_loss,
sampled_data,
sampler_loss_info,
) = self.compute_samplenet_loss(model, data, device)
elif model.sampler is not None and model.sampler.name == "fps":
sampled_data = self.non_learned_sampling(model, data, device)
sampler_loss = torch.tensor(0, dtype=torch.float32)
else:
sampled_data = data
sampler_loss = torch.tensor(0, dtype=torch.float32)
pcrnet_loss, pcrnet_loss_info = self.compute_pcrnet_loss(
model, sampled_data, device, epoch
)
rotation_error = pcrnet_loss_info["rot_err"]
# samplenet loss is already factorized by ALPHA and LMBDA hyper parameters.
loss = pcrnet_loss + sampler_loss
vloss1 = loss.item()
vloss += vloss1
gloss1 = rotation_error.item()
gloss += gloss1
count += 1
ave_vloss = float(vloss) / count
ave_gloss = float(gloss) / count
# Shift back to training (?) mode for task and samppler
model.train(task_state)
if model.sampler is not None:
model.sampler.train(sampler_state)
return ave_vloss, ave_gloss
def test_1(self, model, testloader, device, epoch):
rotation_errors = []
trans_errs = []
consistency_errors = []
with torch.no_grad():
for i, data_and_shape in enumerate(tqdm(testloader)):
data = data_and_shape[0:3]
shape = data_and_shape[3]
# Sample using one of the samplers:
if model.sampler is not None and model.sampler.name == "samplenet":
_, sampled_data, _ = self.compute_samplenet_loss(
model, data, device
)
elif model.sampler is not None and (
model.sampler.name in ["fps", "random"]
):
sampled_data = self.non_learned_sampling(model, data, device)
else:
sampled_data = data
_, pcrnet_loss_info = self.compute_pcrnet_loss(
model, sampled_data, device, epoch
)
consistency = self.compute_sampling_consistency(sampled_data, device)
consistency_errors.append(consistency.item())
rotation_error = pcrnet_loss_info["rot_err"]
trans_err = pcrnet_loss_info["trans_err"]
rotation_errors.append(rotation_error.item())
trans_errs.append(trans_err.item())
if GLOBALS is not None:
append_to_GLOBALS("data", data)
append_to_GLOBALS("rotation_error", rotation_error)
append_to_GLOBALS("sampled_data", sampled_data)
append_to_GLOBALS(
"est_transform", pcrnet_loss_info["est_transform"]
)
append_to_GLOBALS("shape", shape)
# Compute Precision curve and AUC.
rotation_errors = np.array(rotation_errors)
trans_errs = np.array(trans_errs)
consistency_errors = np.array(consistency_errors)
n_samples = len(testloader)
x = np.arange(0.0, 180.0, 0.5)
y = np.zeros(len(x))
for idx, err in enumerate(x):
precision = np.sum(rotation_errors <= err) / n_samples
y[idx] = precision
# plt.figure()
# plt.plot(x, y)
# plt.show()
# plt.savefig("test.png")
auc = np.sum(y) / len(x)
print(f"Experiment name: {self.experiment_name}")
print(f"AUC = {auc}")
print(f"Mean rotation Error = {np.mean(rotation_errors)}")
print(f"STD rotation Error = {np.std(rotation_errors)}")
print(f"Mean consistency Error = | |
0x3e, 0xaf, 0x87,
0xf7, 0x34, 0x2e, 0xef, 0x8a, 0xea, 0x68, 0xfe,
0xf2, 0xf2, 0x8e, 0x32, 0x2b, 0x99, 0xcf, 0xed,
0xfd, 0x82, 0x8e, 0xcf, 0xec, 0xfc, 0xde, 0xda,
0xcf, 0xed, 0xbc, 0x7a, 0xff, 0xec, 0xbc, 0xaf,
0x41, 0xfd, 0xcc, 0x5c, 0x88, 0x9b, 0x94, 0x80,
0x01, 0x00, 0x18, 0x84, 0x18, 0x48, 0x00, 0x60,
0x81, 0x00, 0x40, 0x08, 0x12, 0xa0, 0x41, 0x42,
0x20, 0x04, 0x00, 0x80, 0x21, 0x04, 0x83, 0x04,
0x81, 0x2a, 0x01, 0x2a, 0x01, 0x2a, 0x01, 0x18,
0x80, 0xe1, 0x81, 0x24, 0x42, 0x28, 0x22, 0x08,
0x82, 0x00, 0x20, 0x01, 0x00, 0x80, 0x02, 0x6d,
0x93, 0xa0, 0x48, 0x00, 0x84, 0x42, 0x00, 0x84,
0x43, 0x44, 0x08, 0x48, 0xc0, 0x48, 0x4a, 0x01,
0x42, 0x84, 0x81, 0x12, 0xa9, 0x01, 0x83, 0x04,
0x85, 0x11, 0x11, 0x88, 0x04, 0x22, 0x20, 0x22,
0x04, 0x19, 0x02, 0x19, 0x02, 0x20, 0xe4, 0x81,
0x22, 0x04, 0x42, 0x80, 0xa8, 0x41, 0x20, 0x01,
0x84, 0x89, 0x08, 0xa8, 0x32, 0x88, 0x12, 0x88,
0x22, 0x47, 0x74, 0xf0, 0x22, 0x42, 0x1f, 0x41,
0x72, 0x2c, 0xf3, 0x11, 0x2c, 0x87, 0x16, 0x1f,
0x41, 0x32, 0x6e, 0x1d, 0x2c, 0xef, 0x26, 0xd1,
0xc1, 0xf2, 0x4a, 0x92, 0x1f, 0x81, 0xb2, 0x4a,
0x79, 0x15, 0xf8, 0x4a, 0x92, 0x47, 0x82, 0xaf,
0x54, 0x7b, 0x24, 0xf8, 0x4a, 0xb1, 0x47, 0x82,
0xaf, 0x14, 0xfb, 0x25, 0x48, 0x8d, 0xb1, 0x5f,
0xa2, 0xc4, 0xa1, 0x5f, 0xa2, 0xf4, 0x18, 0x21,
0x5f, 0xa2, 0x74, 0xd8, 0xf1, 0x21, 0x4a, 0xd6,
0xc5, 0x4a, 0x9e, 0x24, 0xac, 0xf4, 0x91, 0x24,
0x4d, 0x43, 0x1f, 0x49, 0x52, 0x3a, 0x1f, 0x49,
0x32, 0x48, 0x1f, 0xc9, 0x76, 0x4a, 0xf3, 0x81,
0x24, 0xab, 0x14, 0x1f, 0x41, 0xb6, 0x4a, 0xf9,
0x11, 0x28, 0xab, 0x94, 0x55, 0xb8, 0x4a, 0x79,
0x34, 0xb8, 0x4a, 0x79, 0x24, 0xf8, 0x42, 0x92,
0x47, 0x83, 0x2f, 0x44, 0xf9, 0x34, 0x48, 0x4a,
0xf9, 0x24, 0x4a, 0x7e, 0xcc, 0x40, 0xe2, 0xc1,
0x54, 0x28, 0x1a, 0x14, 0x78, 0x11, 0x34, 0x42,
0x11, 0x23, 0x14, 0xd1, 0x48, 0x98, 0x21, 0x8f,
0x44, 0x1a, 0x95, 0xa2, 0xc0, 0x81, 0x41, 0x2f,
0x14, 0x19, 0xd4, 0x12, 0x59, 0x86, 0x1c, 0xd8,
0xa6, 0x85, 0xd8, 0xa4, 0xad, 0x81, 0x4f, 0xa2,
0x64, 0x18, 0x26, 0x62, 0x58, 0xb4, 0x9e, 0x24,
0xbc, 0xe4, 0xc9, 0x82, 0xe4, 0x89, 0x46, 0xf2,
0x91, 0x64, 0x81, 0x1f, 0x49, 0x32, 0x4a, 0x17,
0x48, 0x89, 0xd1, 0x41, 0xf2, 0x4a, 0x14, 0x19,
0xf2, 0x42, 0xa4, 0x51, 0x2b, 0xb4, 0x43, 0xb2,
0x42, 0x31, 0x24, 0x2b, 0x14, 0x26, 0x98, 0x11,
0x67, 0x82, 0x88, 0x2f, 0xa2, 0xf1, 0xe4, 0xfc,
0x00, 0x80, 0x04, 0x00, 0x00, 0x00, 0x00, 0x80,
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x08,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x14,
0x22, 0xdf, 0xa7, 0x0f, 0x11, 0x59, 0x11, 0x28,
0x62, 0xc1, 0x93, 0x22, 0xd3, 0x21, 0x41, 0x19,
0x71, 0x18, 0xc2, 0x11, 0x42, 0x18, 0x45, 0x78,
0x48, 0xc2, 0x41, 0x9c, 0x16, 0x02, 0x4e, 0x21,
0x89, 0x45, 0xe4, 0x88, 0x04, 0x24, 0x84, 0x46,
0xc2, 0x43, 0x42, 0x12, 0x24, 0x42, 0x00, 0x00,
0x97, 0x48, 0x31, 0x25, 0x69, 0x26, 0x2c, 0x81,
0x92, 0x11, 0x88, 0x83, 0x92, 0x52, 0x99, 0x24,
0x1c, 0x32, 0x88, 0x18, 0x24, 0x4b, 0x12, 0x24,
0x49, 0x31, 0x24, 0x18, 0xf0, 0xb8, 0xf6, 0x24,
0x42, 0x23, 0xc8, 0x14, 0x77, 0x21, 0x48, 0x6d,
0x12, 0x30, 0x44, 0x90, 0x94, 0x21, 0x83, 0x04,
0x15, 0x84, 0x44, 0x75, 0x84, 0xc8, 0x14, 0x19,
0x42, 0x5c, 0x82, 0x44, 0x42, 0x44, 0x62, 0x44,
0x2a, 0x54, 0xc2, 0x84, 0x85, 0x72, 0x24, 0x28,
0xdc, 0x84, 0x22, 0xb2, 0x46, 0x42, 0x02, 0x10,
0x84, 0x34, 0x14, 0x8b, 0x42, 0x43, 0x11, 0x18,
0xb4, 0x82, 0x54, 0x18, 0xd0, 0x41, 0xa1, 0x48,
0x62, 0x29, 0xd4, 0x4a, 0xe8, 0x84, 0x78, 0x28,
0x34, 0x88, 0x81, 0x94, 0x8d, 0x26, 0x3f, 0xa1,
0x08, 0x13, 0x74, 0x53, 0x44, 0xf8, 0x16, 0x21,
0xcc, 0x34, 0x1e, 0x12, 0x4f, 0x14, 0x51, 0x12,
0x4d, 0x12, 0xc1, 0x25, 0x28, 0x66, 0x41, 0x4d,
0x48, 0x85, 0xd5, 0x1a, 0xc4, 0x7d, 0x50, 0x48,
0x15, 0x62, 0x41, 0x44, 0x9e, 0x44, 0x28, 0x86,
0x26, 0x54, 0xc2, 0x3d, 0x4a, 0x84, 0x43, 0x92,
0x41, 0x4d, 0x28, 0x2c, 0x92, 0x6c, 0x30, 0x42,
0x47, 0x88, 0x65, 0x91, 0x42, 0x47, 0x24, 0x78,
0x6b, 0x21, 0x84, 0x4d, 0x82, 0x4b, 0x4a, 0xb3,
0x64, 0x12, 0x4d, 0x11, 0x23, 0x3c, 0x68, 0x21,
0x86, 0xf6, 0xc8, 0x34, 0x85, 0xc6, 0x8a, 0x81,
0x16, 0x71, 0x38, 0xf6, 0x5d, 0x72, 0x10, 0x02,
0x49, 0x01, 0x29, 0x91, 0x21, 0x23, 0x02, 0x81,
0x13, 0x88, 0xb1, 0x11, 0x04, 0x11, 0x80, 0xc4,
0x84, 0xd0, 0x12, 0x02, 0xab, 0x24, 0x8c, 0x24,
0x44, 0x28, 0x04, 0x89, 0x82, 0x04, 0x2c, 0x34,
0x18, 0x2c, 0x28, 0x04, 0x00, 0x40, 0x01, 0x85,
0xa1, 0x21, 0x81, 0xc0, 0x11, 0x10, 0x28, 0x31,
0x48, 0x8a, 0x04, 0x82, 0x4e, 0x12, 0x84, 0x20,
0xc2, 0x14, 0x43, 0x12, 0x08, 0x5f, 0x66, 0x01,
0x14, 0x40, 0x01, 0x14, 0x20, 0x56, 0x81, 0x00,
0x25, 0x11, 0x51, 0x18, 0x12, 0x85, 0x13, 0xc4,
0x23, 0x48, 0x20, 0xc1, 0x21, 0x90, 0x28, 0x40,
0x01, 0x28, 0x21, 0x00, 0x40, 0x62, 0x41, 0x24,
0x16, 0x44, 0x02, 0x21, 0x50, 0x22, 0x84, 0x00,
0x00, 0x00, 0x40, 0xc8, 0x92, 0x41, 0x29, 0x78,
0x84, 0xc8, 0x82, 0x43, 0x88, 0x09, 0x42, 0xd0,
0x57, 0x06, 0x50, 0x12, 0x28, 0x14, 0x41, 0x41,
0x94, 0x61, 0x26, 0x18, 0xc6, 0x4a, 0x84, 0x48,
0x84, 0x21, 0x4e, 0x18, 0x8c, 0x84, 0x31, 0x23,
0xad, 0x62, 0xe0, 0x84, 0x01, 0x89, 0x92, 0x28,
0x21, 0x84, 0x8b, 0x12, 0x8d, 0x22, 0x2a, 0x21,
0x36, 0x12, 0xa0, 0x24, 0x00, 0x41, 0x83, 0x11,
0x04, 0x11, 0x8c, 0x11, 0x04, 0x84, 0x8e, 0x42,
0x1c, 0x31, 0x12, 0x33, 0x74, 0x42, 0x38, 0x18,
0x16, 0xf8, 0x12, 0x41, 0x48, 0x12, 0x48, 0x4c,
0xb8, 0x94, 0x22, 0xb2, 0xb2, 0x0c, 0x10, 0x13,
0x11, 0x11, 0x15, 0xb4, 0x48, 0x42, 0x9d, 0x28,
0x25, 0x74, 0x42, 0x41, 0x38, 0x48, 0x89, 0x11,
0x72, 0x5a, 0x34, 0x68, 0x42, 0x25, 0xc2, 0x18,
0x26, 0xa1, 0x41, 0x10, 0xc8, 0x2b, 0x90, 0x28,
0x16, 0x08, 0x20, 0x44, 0x32, 0x14, 0x22, 0x25,
0x04, 0x25, 0x42, 0x18, 0x04, 0x25, 0x48, 0x08,
0x10, 0xa8, 0x84, 0x14, 0x2c, 0x51, 0x29, 0x89,
0x21, 0x91, 0x9a, 0x27, 0x94, 0x48, 0x4c, 0x88,
0x25, 0x88, 0x84, 0xf2, 0x72, 0x8e, 0x40, 0x01,
0x22, 0x00, 0x50, 0x21, 0x00, 0x10, 0x58, 0x12,
0x11, 0x50, 0x18, 0x10, 0x31, 0x12, 0x19, 0xe2,
0x22, 0x04, 0x84, 0x89, 0x22, 0x02, 0x40, 0x22,
0xc2, 0x2a, 0x1a, 0x62, 0x22, 0x18, 0x80, 0x24,
0x01, 0x44, 0x18, 0x10, 0x42, 0x81, 0x11, 0x04,
0x00, 0x27, 0x21, 0x80, 0xa4, 0x84, 0x65, 0x08,
0x4b, 0x81, 0x88, 0x29, 0x81, 0x18, 0xb4, 0x34,
0x08, 0xaf, 0xbd, 0x07, 0x21, 0x22, 0x41, 0x84,
0x30, 0x28, 0x00, 0x81, 0xa0, 0x12, 0x84, 0x29,
0x02, 0xa0, 0x38, 0x1e, 0x44, 0x91, 0x40, 0x24,
0x43, 0x08, 0x2c, 0x11, 0xc8, 0x28, 0x30, 0x28,
0x28, 0x81, 0x30, 0x18, 0x00, 0x22, 0x44, 0x23,
0x84, 0x82, 0x01, 0x43, 0x01, 0x12, 0x21, 0x00,
0x22, 0xa9, 0x02, 0x46, 0x04, 0x29, 0x63, 0x84,
0x82, 0x81, 0x20, 0x04, 0x8c, 0xf1, 0xd7, 0xb9,
0x80, 0x64, 0x25, 0x30, 0x24, 0x47, 0x84, 0x00,
0x84, 0x23, 0x81, 0x01, 0x39, 0x33, 0x11, 0x13,
0x72, 0x42, 0x18, 0x04, 0x42, 0x41, 0x2c, 0x44,
0x04, 0x38, 0x8d, 0x31, 0x40, 0xa1, 0x12, 0x10,
0x22, 0x04, 0x29, 0x22, 0x44, 0x54, 0x48, 0x11,
0x42, 0x51, 0x21, 0x40, 0x51, 0x84, 0x60, 0x82,
0x68, 0x1e, 0x88, 0x23, 0x84, 0xc2, 0x88, 0x12,
0x49, 0x58, 0x58, 0x00, 0x49, 0x31, 0x26, 0x28,
0xc0, 0x12, 0xff, 0x3c, 0x01, 0x00, 0x12, 0x00,
0x10, 0x88, 0x12, 0x11, 0x08, 0x10, 0x88, 0x24,
0x01, 0x25, 0x08, 0x58, 0x28, 0x84, 0x42, 0x40,
0x84, 0x42, 0x02, 0x40, 0x0c, 0x18, 0xc0, 0x12,
0x00, 0x20, 0x31, 0x12, 0x45, 0x48, 0x22, 0x82,
0x35, 0x22, 0x43, 0x01, 0x21, 0x18, 0x20, 0x05,
0x81, 0x14, 0xc0, 0x48, 0x42, 0x88, 0x00, 0x46,
0xf4, 0x3a, 0x82, 0x34, 0x48, 0x00, 0x8c, 0x62,
0xc4, 0x8c, 0x42, 0x18, 0x18, 0x6b, 0x81, 0x26,
0x32, 0x31, 0xb9, 0xc1, 0x41, 0xa9, 0xc4, 0xb4,
0x23, 0x71, 0x48, 0x21, 0xd1, 0x24, 0x66, 0xc3,
0x89, 0x24, 0x01, 0x21, 0x28, 0x23, 0x03, 0x2d,
0x2c, 0x81, 0x2c, 0x91, 0x28, 0x2c, 0x01, 0x41,
0x51, 0x12, 0x27, 0x61, 0x44, 0x24, 0x2e, 0x14,
0x4f, 0x81, 0xb4, 0x12, 0x22, 0x21, 0xd2, 0xa2,
0x21, 0x43, 0x38, 0x58, 0x43, 0x41, 0xc4, 0x18,
0x81, 0x83, 0xc8, 0x5c, 0x2b, 0x82, 0x48, 0x40,
0xf8, 0x85, 0x8b, 0x00, 0x1a, 0x92, 0x16, 0x44,
0x00, 0x80, 0x82, 0x01, 0x18, 0x84, 0x00, 0x18,
0x70, 0x48, 0x04, 0x46, 0x04, | |
nhwidth_est
def getEstStaffLineLocs(featmap, nhlocs, stavelens, colWidth, deltaRowMax, globalOffset = 0):
preds = []
if np.isscalar(globalOffset):
globalOffset = [globalOffset] * len(nhlocs)
for i, nhloc in enumerate(nhlocs):
r = int(np.round(nhloc[0]))
c = int(np.round(nhloc[1]))
rupper = min(r + deltaRowMax + 1 + globalOffset[i], featmap.shape[1])
rlower = max(r - deltaRowMax + globalOffset[i], 0)
featmapIdx = c // colWidth
regCurrent = np.squeeze(featmap[:, rlower:rupper, featmapIdx])
mapidx, roffset = np.unravel_index(regCurrent.argmax(), regCurrent.shape)
rstart = rlower + roffset
rend = rstart + stavelens[mapidx] - 1
preds.append((rstart, rend, c, r, mapidx))
sfiltlen = int(np.round(np.median([stavelens[tup[4]] for tup in preds])))
return preds, sfiltlen
def visualizeEstStaffLines(preds, arr):
showGrayscaleImage(arr, (15,15))
rows1 = np.array([pred[0] for pred in preds]) # top staff line
rows2 = np.array([pred[1] for pred in preds]) # bottom staff line
cols = np.array([pred[2] for pred in preds]) # nh col
rows3 = np.array([pred[3] for pred in preds]) # nh row
plt.scatter(cols, rows1, c = 'r', s = 3)
plt.scatter(cols, rows2, c = 'b', s = 3)
plt.scatter(cols, rows3, c = 'y', s = 3)
def estimateStaffMidpoints(preds, clustersMin, clustersMax, threshold):
r = np.array([.5*(tup[0] + tup[1]) for tup in preds]) # midpts of estimated stave locations
models = []
for numClusters in range(clustersMin, clustersMax + 1):
kmeans = KMeans(n_clusters=numClusters, n_init=1, random_state = 0).fit(r.reshape(-1,1))
sorted_list = np.array(sorted(np.squeeze(kmeans.cluster_centers_)))
mindiff = np.min(sorted_list[1:] - sorted_list[0:-1])
if numClusters > clustersMin and mindiff < threshold:
break
models.append(kmeans)
staffMidpts = np.sort(np.squeeze(models[-1].cluster_centers_))
return staffMidpts
def debugStaffMidpointClustering(preds):
r = np.array([.5*(tup[0] + tup[1]) for tup in preds]) # midpts of estimated stave locations
inertias = []
mindiffs = []
clusterRange = np.arange(2,12)
for numClusters in clusterRange:
kmeans = KMeans(n_clusters=numClusters, n_init=1, random_state = 0).fit(r.reshape(-1,1))
inertias.append(kmeans.inertia_)
sorted_list = np.array(sorted(np.squeeze(kmeans.cluster_centers_)))
diffs = sorted_list[1:] - sorted_list[0:-1]
mindiffs.append(np.min(diffs))
plt.subplot(211)
plt.plot(clusterRange, np.log(inertias))
plt.xlabel('Number of Clusters')
plt.ylabel('Inertia')
plt.subplot(212)
plt.plot(clusterRange, mindiffs)
plt.xlabel('Number of Clusters')
plt.ylabel('Min Centroid Separation')
plt.axhline(60, color='r')
def visualizeStaffMidpointClustering(preds, centers):
r = np.array([.5*(tup[0] + tup[1]) for tup in preds]) # midpts of estimated stave locations
plt.plot(r, np.random.uniform(size = len(r)), '.')
for center in centers:
plt.axvline(x=center, color='r')
def assignNoteheadsToStaves(nhlocs, staveCenters):
nhrows = np.matlib.repmat([tup[0] for tup in nhlocs], len(staveCenters), 1)
centers = np.matlib.repmat(staveCenters.reshape((-1,1)), 1, len(nhlocs))
staveIdxs = np.argmin(np.abs(nhrows - centers), axis=0)
offsets = staveCenters[staveIdxs] - nhrows[0,:] # row offset between note and staff midpoint
return staveIdxs, offsets
def visualizeClusters(arr, nhlocs, clusters):
showGrayscaleImage(arr)
rows = np.array([tup[0] for tup in nhlocs])
cols = np.array([tup[1] for tup in nhlocs])
plt.scatter(cols, rows, c=clusters)
for i in range(len(clusters)):
plt.text(cols[i], rows[i] - 15, str(clusters[i]), fontsize = 12, color='red')
def estimateNoteLabels(preds):
nhvals = [] # estimated note labels
for i, (rstart, rend, c, r, filtidx) in enumerate(preds):
# if a stave has height L, there are 8 stave locations in (L-1) pixel rows
staveMidpt = .5 * (rstart + rend)
noteStaveLoc = -1.0 * (r - staveMidpt) * 8 / (rend - rstart)
nhval = int(np.round(noteStaveLoc))
nhvals.append(nhval)
return nhvals
def visualizeNoteLabels(arr, vals, locs):
showGrayscaleImage(arr)
rows = np.array([loc[0] for loc in locs])
cols = np.array([loc[1] for loc in locs])
plt.scatter(cols, rows, color='blue')
for i in range(len(rows)):
plt.text(cols[i], rows[i] - 15, str(vals[i]), fontsize = 12, color='red')
def isolateBarlines(im, morphFilterVertLineLength, morphFilterVertLineWidth, maxBarlineWidth):
hkernel = np.ones((1, morphFilterVertLineWidth), np.uint8) # dilate first to catch warped barlines
vlines = cv2.dilate(im, hkernel, iterations = 1)
vlines = morphFilterRectangle(vlines, morphFilterVertLineLength, 1) # then filter for tall vertical lines
nonbarlines = morphFilterRectangle(vlines, 1, maxBarlineWidth)
vlines = np.clip(vlines - nonbarlines, 0, 1)
return vlines
def determineStaveGrouping(staveMidpts, vlines):
N = len(staveMidpts)
rowSums = np.sum(vlines, axis=1)
# grouping A: 0-1, 2-3, 4-5, ...
elems_A = []
map_A = {}
for i, staveIdx in enumerate(np.arange(0, N, 2)):
if staveIdx+1 < N:
startRow = int(staveMidpts[staveIdx])
endRow = int(staveMidpts[staveIdx+1]) + 1
elems_A.extend(rowSums[startRow:endRow])
map_A[staveIdx] = staveIdx
map_A[staveIdx+1] = staveIdx + 1
else:
map_A[staveIdx] = -1 # unpaired stave
# grouping B: 1-2, 3-4, 5-6, ...
elems_B = []
map_B = {}
map_B[0] = -1
for i, staveIdx in enumerate(np.arange(1, N, 2)):
if staveIdx+1 < N:
startRow = int(staveMidpts[staveIdx])
endRow = int(staveMidpts[staveIdx+1]) + 1
elems_B.extend(rowSums[startRow:endRow])
map_B[staveIdx] = staveIdx - 1
map_B[staveIdx + 1] = staveIdx
else:
map_B[staveIdx] = -1
if N > 2:
evidence_A = np.median(elems_A)
evidence_B = np.median(elems_B)
if evidence_A > evidence_B:
mapping = map_A
else:
mapping = map_B
else:
evidence_A = np.median(elems_A)
evidence_B = 0
mapping = map_A
return mapping, (evidence_A, evidence_B, elems_A, elems_B)
def debugStaveGrouping(vlines, staveCenters):
plt.plot(np.sum(vlines, axis=1))
for m in staveCenters:
plt.axvline(m, color = 'r')
def clusterNoteheads(staveIdxs, mapping):
clusterIdxs = [mapping[staveIdx] for staveIdx in staveIdxs]
maxClusterIdx = np.max(np.array(clusterIdxs))
clusterPairs = []
for i in range(0, maxClusterIdx, 2):
clusterPairs.append((i,i+1))
return clusterIdxs, clusterPairs
def generateSingleBootlegLine(nhdata, clusterR, clusterL, minColDiff, repeatNotes = 1, filler = 1):
notes = [tup for tup in nhdata if tup[3] == clusterR or tup[3] == clusterL]
notes = sorted(notes, key = lambda tup: (tup[1], tup[0])) # sort by column, then row
collapsed = collapseSimultaneousEvents(notes, minColDiff) # list of (rows, cols, vals, clusters)
bscore, eventIndices, staffLinesBoth, _, _ = constructBootlegScore(collapsed, clusterR, clusterL, repeatNotes, filler)
return bscore, collapsed, eventIndices, staffLinesBoth
def collapseSimultaneousEvents(notes, minColDiff):
assigned = np.zeros(len(notes), dtype=bool)
events = [] # list of simultaneous note events
for i, (row, col, val, cluster) in enumerate(notes):
if assigned[i]: # has already been assigned
continue
rows = [row] # new event
cols = [col]
vals = [val]
clusters = [cluster]
assigned[i] = True
for j in range(i+1, len(notes)):
nrow, ncol, nval, ncluster = notes[j]
if ncol - col < minColDiff: # assign to same event if close
rows.append(nrow)
cols.append(ncol)
vals.append(nval)
clusters.append(ncluster)
assigned[j] = True
else:
break
events.append((rows, cols, vals, clusters))
assert(np.all(assigned))
return events
def constructBootlegScore(noteEvents, clusterIndexRH, clusterIndexLH, repeatNotes = 1, filler = 1):
# note that this has to match generateBootlegScore() in the previous notebook!
rh_dim = 34 # E3 to C8 (inclusive)
lh_dim = 28 # A1 to G4 (inclusive)
rh = [] # list of arrays of size rh_dim
lh = [] # list of arrays of size lh_dim
eventIndices = [] # index of corresponding simultaneous note event
for i, (rows, cols, vals, clusters) in enumerate(noteEvents):
# insert empty filler columns between note events
if i > 0:
for j in range(filler):
rh.append(np.zeros((rh_dim,1)))
lh.append(np.zeros((lh_dim,1)))
eventIndices.append(i-1) # assign filler to previous event
# insert note events columns
rhvec, lhvec = getNoteheadPlacement(vals, clusters, rh_dim, lh_dim, clusterIndexRH, clusterIndexLH)
for j in range(repeatNotes):
rh.append(rhvec)
lh.append(lhvec)
eventIndices.append(i)
rh = np.squeeze(np.array(rh)).reshape((-1, rh_dim)).T # reshape handles case when len(rh) == 1
lh = np.squeeze(np.array(lh)).reshape((-1, lh_dim)).T
both = np.vstack((lh, rh))
staffLinesRH = [7,9,11,13,15]
staffLinesLH = [13,15,17,19,21]
staffLinesBoth = [13,15,17,19,21,35,37,39,41,43]
return both, eventIndices, staffLinesBoth, (rh, staffLinesRH), (lh, staffLinesLH)
def getNoteheadPlacement(vals, clusters, rdim, ldim, clusterRH, clusterLH):
rhvec = np.zeros((rdim, 1))
lhvec = np.zeros((ldim, 1))
assert(clusterLH == clusterRH + 1)
for (val, cluster) in zip(vals, clusters):
if cluster == clusterRH:
idx = val + 11
if idx >= 0 and idx < rdim:
rhvec[idx, 0] = 1
elif cluster == clusterLH:
idx = val + 17
if idx >= 0 and idx < ldim:
lhvec[idx, 0] = 1
else:
print("Invalid cluster: {} (LH {}, RH {})".format(cluster, clusterLH, clusterRH))
sys.exit(1)
return rhvec, lhvec
def visualizeBootlegScore(bs, lines):
plt.figure(figsize = (10,10))
plt.imshow(1 - bs, cmap = 'gray', origin = 'lower')
for l in range(1, bs.shape[0], 2):
plt.axhline(l, c = 'grey')
for l in lines:
plt.axhline(l, c = 'r')
def generateImageBootlegScore(nhdata, pairings, repeatNotes = 1, filler = 1, minColDiff = 10):
allScores = []
allEvents = []
globIndices = []
eventCount = 0
if len(pairings) == 0:
return None, None, None, None
for i, (clusterR, clusterL) in enumerate(pairings):
score, events, eventIndices, staffLinesBoth = generateSingleBootlegLine(nhdata, clusterR, clusterL, minColDiff, repeatNotes, filler)
allScores.append(score)
allEvents.extend(events)
globIndices.extend([idx + eventCount for idx in eventIndices])
if filler > 0 and i < len(pairings) - 1:
allScores.append(np.zeros((score.shape[0], filler))) # append filler columns between bootleg scores
globIndices.extend([globIndices[-1]] * filler) # map filler columns to last event index
eventCount += len(events)
panorama = np.hstack(allScores)
return panorama, allEvents, globIndices, staffLinesBoth
def visualizeLongBootlegScore(bs, lines, chunksz = 150):
chunks = bs.shape[1] // chunksz + 1
for i in range(chunks):
startcol = i * chunksz
endcol = min((i + 1)*chunksz, bs.shape[1])
visualizeBootlegScore(bs[:,startcol:endcol], lines)
def processImageFile(imagefile, outfile):
### system parameters ###
# | |
'618660707':{'en': 'Koorda'},
'618660708':{'en': 'Lancelin'},
'618660709':{'en': 'Meckering'},
'618660710':{'en': 'Miling'},
'618660711':{'en': 'Moora'},
'618660712':{'en': 'Northam'},
'618660713':{'en': 'Pantapin'},
'618660714':{'en': '<NAME>'},
'618660715':{'en': 'Quairading'},
'618660716':{'en': 'Regans Ford'},
'618660717':{'en': 'South Quairading'},
'618660718':{'en': 'Studleigh'},
'618660719':{'en': '<NAME>'},
'618660720':{'en': 'Tammin'},
'618660721':{'en': 'Trayning'},
'618660722':{'en': 'Wannamal'},
'618660723':{'en': 'Watheroo'},
'618660724':{'en': '<NAME>'},
'618660725':{'en': 'Wubin'},
'618660726':{'en': 'Wubin West'},
'618660727':{'en': 'Wyalkatchem'},
'618660728':{'en': 'Yelbeni'},
'618660729':{'en': 'Yerecoin'},
'618660730':{'en': 'York'},
'618660731':{'en': 'Yorkrakine'},
'618660732':{'en': 'Aldersyde'},
'618660733':{'en': '<NAME>'},
'618660734':{'en': 'Badgingarra'},
'618660735':{'en': 'Balkuling'},
'618660736':{'en': 'Ballidu'},
'618660737':{'en': 'Beacon'},
'618660738':{'en': 'Beacon North'},
'618660739':{'en': 'Bencubbin'},
'618660740':{'en': 'Beverley'},
'618660741':{'en': 'Beverley West'},
'618660742':{'en': 'Bibby Springs'},
'618660743':{'en': 'Bidaminna'},
'618660744':{'en': 'Bolgart'},
'618660745':{'en': 'Brookton'},
'618660746':{'en': 'Burakin'},
'618660747':{'en': 'Cadoux'},
'618660748':{'en': 'Calingiri'},
'618660749':{'en': 'Cleary North'},
'618660750':{'en': 'Coomallo'},
'618660751':{'en': 'Coomberdale'},
'618660752':{'en': 'Cunderdin'},
'618660753':{'en': 'Cunderdin North'},
'618660754':{'en': 'Dale River'},
'618660755':{'en': 'Dalwallinu'},
'618660756':{'en': 'Dalwallinu West'},
'618660757':{'en': 'Dandaragan'},
'618660758':{'en': 'Dangin'},
'618660759':{'en': 'Dowerin'},
'618660760':{'en': 'Dukin'},
'618660761':{'en': 'Ejanding'},
'618660762':{'en': 'Gabbin'},
'618660763':{'en': 'Gabbin North'},
'618660764':{'en': 'Gillingarra'},
'618660765':{'en': 'Goodlands'},
'618660766':{'en': 'Goomalling'},
'618660767':{'en': 'Jelkobine'},
'618660768':{'en': 'Jennacubbine'},
'618660769':{'en': 'Jurien'},
'618660770':{'en': 'Kalannie'},
'618660771':{'en': '<NAME>'},
'618660772':{'en': 'Konnongorring'},
'618660773':{'en': 'Koorda'},
'618660774':{'en': 'Lancelin'},
'618660775':{'en': 'Meckering'},
'618660776':{'en': 'Miling'},
'618660777':{'en': 'Moora'},
'618660778':{'en': 'Northam'},
'618660779':{'en': 'Pantapin'},
'618660780':{'en': 'Quairading'},
'618660781':{'en': '<NAME>'},
'618660782':{'en': '<NAME>'},
'618660783':{'en': 'Studleigh'},
'618660784':{'en': '<NAME>'},
'618660785':{'en': 'Tammin'},
'618660786':{'en': 'Trayning'},
'618660787':{'en': 'Wannamal'},
'618660788':{'en': 'Watheroo'},
'618660789':{'en': '<NAME>'},
'618660790':{'en': 'Wubin'},
'618660791':{'en': 'Wubin West'},
'618660792':{'en': 'Wyalkatchem'},
'618660793':{'en': 'Yelbeni'},
'618660794':{'en': 'Yerecoin'},
'618660795':{'en': 'York'},
'618660796':{'en': 'Yorkrakine'},
'618660797':{'en': 'Aldersyde'},
'618660798':{'en': '<NAME>'},
'618660799':{'en': 'Badgingarra'},
'61866080':{'en': 'Northam'},
'61866081':{'en': 'Jurien'},
'61866082':{'en': 'Lancelin'},
'618660830':{'en': 'Balkuling'},
'618660831':{'en': 'Ballidu'},
'618660832':{'en': 'Beacon'},
'618660833':{'en': 'Beacon North'},
'618660834':{'en': 'Bencubbin'},
'618660835':{'en': 'Beverley'},
'618660836':{'en': '<NAME>'},
'618660837':{'en': '<NAME>'},
'618660838':{'en': 'Bidaminna'},
'618660839':{'en': 'Bolgart'},
'618660840':{'en': 'Brookton'},
'618660841':{'en': 'Burakin'},
'618660842':{'en': 'Cadoux'},
'618660843':{'en': 'Calingiri'},
'618660844':{'en': 'Cleary North'},
'618660845':{'en': 'Coomallo'},
'618660846':{'en': 'Coomberdale'},
'618660847':{'en': 'Cunderdin'},
'618660848':{'en': 'Cunderdin North'},
'618660849':{'en': 'Dale River'},
'618660850':{'en': 'Dalwallinu'},
'618660851':{'en': 'Dalwallinu West'},
'618660852':{'en': 'Dandaragan'},
'618660853':{'en': 'Dangin'},
'618660854':{'en': 'Dowerin'},
'618660855':{'en': 'Dukin'},
'618660856':{'en': 'Ejanding'},
'618660857':{'en': 'Gabbin'},
'618660858':{'en': 'Gabbin North'},
'618660859':{'en': 'Gillingarra'},
'618660860':{'en': 'Goodlands'},
'618660861':{'en': 'Goomalling'},
'618660862':{'en': 'Jelkobine'},
'618660863':{'en': 'Jennacubbine'},
'618660864':{'en': 'Jurien'},
'618660865':{'en': 'Kalannie'},
'618660866':{'en': 'Kalannie East'},
'618660867':{'en': 'Konnongorring'},
'618660868':{'en': 'Koorda'},
'618660869':{'en': 'Lancelin'},
'618660870':{'en': 'Meckering'},
'618660871':{'en': 'Miling'},
'618660872':{'en': 'Moora'},
'618660873':{'en': 'Aldersyde'},
'618660874':{'en': '<NAME>'},
'618660875':{'en': 'Badgingarra'},
'618660876':{'en': 'Balkuling'},
'618660877':{'en': 'Ballidu'},
'618660878':{'en': 'Beacon'},
'618660879':{'en': 'Beacon North'},
'618660880':{'en': 'Bencubbin'},
'618660881':{'en': 'Beverley'},
'618660882':{'en': 'Beverley West'},
'618660883':{'en': 'Bibby Springs'},
'618660884':{'en': 'Bidaminna'},
'618660885':{'en': 'Bolgart'},
'618660886':{'en': 'Brookton'},
'618660887':{'en': 'Burakin'},
'618660888':{'en': 'Cadoux'},
'618660889':{'en': 'Calingiri'},
'618660890':{'en': 'Cleary North'},
'618660891':{'en': 'Coomallo'},
'618660892':{'en': 'Coomberdale'},
'618660893':{'en': 'Cunderdin'},
'618660894':{'en': 'Cunderdin North'},
'618660895':{'en': 'Dale River'},
'618660896':{'en': 'Dalwallinu'},
'618660897':{'en': 'Dalwallinu West'},
'618660898':{'en': 'Dandaragan'},
'618660899':{'en': 'Dangin'},
'618660900':{'en': 'Dowerin'},
'618660901':{'en': 'Dukin'},
'618660902':{'en': 'Ejanding'},
'618660903':{'en': 'Gabbin'},
'618660904':{'en': 'Gabbin North'},
'618660905':{'en': 'Gillingarra'},
'618660906':{'en': 'Goodlands'},
'618660907':{'en': 'Goomalling'},
'618660908':{'en': 'Jelkobine'},
'618660909':{'en': 'Jennacubbine'},
'618660910':{'en': 'Jurien'},
'618660911':{'en': 'Kalannie'},
'618660912':{'en': 'Kalannie East'},
'618660913':{'en': 'Konnongorring'},
'618660914':{'en': 'Koorda'},
'618660915':{'en': 'Lancelin'},
'618660916':{'en': 'Meckering'},
'618660917':{'en': 'Miling'},
'618660918':{'en': 'Moora'},
'618660919':{'en': 'Northam'},
'618660920':{'en': 'Pantapin'},
'618660921':{'en': 'Quairading'},
'618660922':{'en': 'Regans Ford'},
'618660923':{'en': 'South Quairading'},
'618660924':{'en': 'Studleigh'},
'618660925':{'en': 'Talbot Brook'},
'618660926':{'en': 'Tammin'},
'618660927':{'en': 'Trayning'},
'618660928':{'en': 'Wannamal'},
'618660929':{'en': 'Watheroo'},
'618660930':{'en': 'Wongan Hills'},
'618660931':{'en': 'Wubin'},
'618660932':{'en': 'Wubin West'},
'618660933':{'en': 'Wyalkatchem'},
'618660934':{'en': 'Yelbeni'},
'618660935':{'en': 'Yerecoin'},
'618660936':{'en': 'York'},
'618660937':{'en': 'Yorkrakine'},
'618660938':{'en': 'Northam'},
'618660939':{'en': 'Pantapin'},
'618660940':{'en': 'Quairading'},
'618660941':{'en': 'Regans Ford'},
'618660942':{'en': 'South Quairading'},
'618660943':{'en': 'Studleigh'},
'618660944':{'en': 'Talbot Brook'},
'618660945':{'en': 'Tammin'},
'618660946':{'en': 'Trayning'},
'618660947':{'en': 'Wannamal'},
'618660948':{'en': 'Watheroo'},
'618660949':{'en': 'Wongan Hills'},
'618660950':{'en': 'Wubin'},
'618660951':{'en': 'Wubin West'},
'618660952':{'en': 'Wyalkatchem'},
'618660953':{'en': 'Yelbeni'},
'618660954':{'en': 'Yerecoin'},
'618660955':{'en': 'York'},
'618660956':{'en': 'Yorkrakine'},
'618660957':{'en': 'Aldersyde'},
'618660958':{'en': '<NAME>'},
'618660959':{'en': 'Badgingarra'},
'618660960':{'en': 'Balkuling'},
'618660961':{'en': 'Ballidu'},
'618660962':{'en': 'Beacon'},
'618660963':{'en': 'Beacon North'},
'618660964':{'en': 'Bencubbin'},
'618660965':{'en': 'Beverley'},
'618660966':{'en': 'Beverley West'},
'618660967':{'en': 'Bibby Springs'},
'618660968':{'en': 'Bidaminna'},
'618660969':{'en': 'Bolgart'},
'618660970':{'en': 'Brookton'},
'618660971':{'en': 'Burakin'},
'618660972':{'en': 'Cadoux'},
'618660973':{'en': 'Calingiri'},
'618660974':{'en': 'Cleary North'},
'618660975':{'en': 'Coomallo'},
'618660976':{'en': 'Coomberdale'},
'618660977':{'en': 'Cunderdin'},
'618660978':{'en': 'Cunderdin North'},
'618660979':{'en': 'Dale River'},
'618660980':{'en': 'Dalwallinu'},
'618660981':{'en': 'Dalwallinu West'},
'618660982':{'en': 'Dandaragan'},
'618660983':{'en': 'Dangin'},
'618660984':{'en': 'Dowerin'},
'618660985':{'en': 'Dukin'},
'618660986':{'en': 'Ejanding'},
'618660987':{'en': 'Gabbin'},
'618660988':{'en': 'Gabbin North'},
'618660989':{'en': 'Gillingarra'},
'618660990':{'en': 'Goodlands'},
'618660991':{'en': 'Goomalling'},
'618660992':{'en': 'Jelkobine'},
'618660993':{'en': 'Jennacubbine'},
'618660994':{'en': 'Jurien'},
'618660995':{'en': 'Kalannie'},
'618660996':{'en': 'Kalannie East'},
'618660997':{'en': 'Konnongorring'},
'618660998':{'en': 'Koorda'},
'618660999':{'en': 'Lancelin'},
'618661000':{'en': 'Meckering'},
'618661001':{'en': 'Miling'},
'618661002':{'en': 'Moora'},
'618661003':{'en': 'Northam'},
'618661004':{'en': 'Pantapin'},
'618661005':{'en': 'Quairading'},
'618661006':{'en': '<NAME>'},
'618661007':{'en': 'South Quairading'},
'618661008':{'en': 'Studleigh'},
'618661009':{'en': '<NAME>'},
'618661010':{'en': 'Tammin'},
'618661011':{'en': 'Trayning'},
'618661012':{'en': 'Wannamal'},
'618661013':{'en': 'Watheroo'},
'618661014':{'en': '<NAME>'},
'618661015':{'en': 'Wubin'},
'618661016':{'en': 'Wubin West'},
'618661017':{'en': 'Wyalkatchem'},
'618661018':{'en': 'Yelbeni'},
'618661019':{'en': 'Yerecoin'},
'618661020':{'en': 'York'},
'618661021':{'en': 'Yorkrakine'},
'618661022':{'en': 'Aldersyde'},
'618661023':{'en': '<NAME>'},
'618661024':{'en': 'Badgingarra'},
'618661025':{'en': 'Balkuling'},
'618661026':{'en': 'Ballidu'},
'618661027':{'en': 'Beacon'},
'618661028':{'en': 'Beacon North'},
'618661029':{'en': 'Bencubbin'},
'618661030':{'en': 'Beverley'},
'618661031':{'en': 'Beverley West'},
'618661032':{'en': 'Bibby Springs'},
'618661033':{'en': 'Bidaminna'},
'618661034':{'en': 'Bolgart'},
'618661035':{'en': 'Brookton'},
'618661036':{'en': 'Burakin'},
'618661037':{'en': 'Cadoux'},
'618661038':{'en': 'Calingiri'},
'618661039':{'en': 'Cleary North'},
'618661040':{'en': 'Coomallo'},
'618661041':{'en': 'Coomberdale'},
'618661042':{'en': 'Cunderdin'},
'618661043':{'en': 'Cunderdin North'},
'618661044':{'en': 'Dale River'},
'618661045':{'en': 'Dalwallinu'},
'618661046':{'en': 'Dalwallinu West'},
'618661047':{'en': 'Dandaragan'},
'618661048':{'en': 'Dangin'},
'618661049':{'en': 'Dowerin'},
'618661050':{'en': 'Dukin'},
'618661051':{'en': 'Ejanding'},
'618661052':{'en': 'Gabbin'},
'618661053':{'en': 'Gabbin North'},
'618661054':{'en': 'Gillingarra'},
'618661055':{'en': 'Goodlands'},
'618661056':{'en': 'Goomalling'},
'618661057':{'en': 'Jelkobine'},
'618661058':{'en': 'Jennacubbine'},
'618661059':{'en': 'Jurien'},
'618661060':{'en': 'Kalannie'},
'618661061':{'en': 'Kalannie East'},
'618661062':{'en': 'Konnongorring'},
'618661063':{'en': 'Koorda'},
'618661064':{'en': 'Lancelin'},
'618661065':{'en': 'Meckering'},
'618661066':{'en': 'Miling'},
'618661067':{'en': 'Moora'},
'618661068':{'en': 'Northam'},
'618661069':{'en': 'Pantapin'},
'618661070':{'en': 'Quairading'},
'618661071':{'en': '<NAME>'},
'618661072':{'en': '<NAME>'},
'618661073':{'en': 'Studleigh'},
'618661074':{'en': '<NAME>'},
'618661075':{'en': 'Tammin'},
'618661076':{'en': 'Trayning'},
'618661077':{'en': 'Wannamal'},
'618661078':{'en': 'Watheroo'},
'618661079':{'en': '<NAME>'},
'618661080':{'en': 'Wubin'},
'618661081':{'en': '<NAME>'},
'618661082':{'en': 'Wyalkatchem'},
'618661083':{'en': 'Yelbeni'},
'618661084':{'en': 'Yerecoin'},
'618661085':{'en': 'York'},
'618661086':{'en': 'Yorkrakine'},
'618661087':{'en': 'Aldersyde'},
'618661088':{'en': '<NAME>'},
'618661089':{'en': 'Badgingarra'},
'61866109':{'en': 'Cunderdin'},
'61866110':{'en': '<NAME>'},
'618661110':{'en': 'Balkuling'},
'618661111':{'en': 'Ballidu'},
'618661112':{'en': 'Beacon'},
'618661113':{'en': 'Beacon North'},
'618661114':{'en': 'Bencubbin'},
'618661115':{'en': 'Beverley'},
'618661116':{'en': 'Beverley West'},
'618661117':{'en': '<NAME>'},
'618661118':{'en': 'Bidaminna'},
'618661119':{'en': 'Bolgart'},
'618661120':{'en': 'Brookton'},
'618661121':{'en': 'Burakin'},
'618661122':{'en': 'Cadoux'},
'618661123':{'en': 'Calingiri'},
'618661124':{'en': 'Cleary North'},
'618661125':{'en': 'Coomallo'},
'618661126':{'en': 'Coomberdale'},
'618661127':{'en': 'Cunderdin'},
'618661128':{'en': 'Cunderdin North'},
'618661129':{'en': 'Dale River'},
'618661130':{'en': 'Dalwallinu'},
'618661131':{'en': 'Dalwallinu West'},
'618661132':{'en': 'Dandaragan'},
'618661133':{'en': 'Dangin'},
'618661134':{'en': 'Dowerin'},
'618661135':{'en': 'Dukin'},
'618661136':{'en': 'Ejanding'},
'618661137':{'en': 'Gabbin'},
'618661138':{'en': 'Gabbin North'},
'618661139':{'en': 'Gillingarra'},
'618661140':{'en': 'Goodlands'},
'618661141':{'en': 'Goomalling'},
'618661142':{'en': 'Jelkobine'},
'618661143':{'en': 'Jennacubbine'},
'618661144':{'en': 'Jurien'},
'618661145':{'en': 'Kalannie'},
'618661146':{'en': 'Kalannie East'},
'618661147':{'en': 'Konnongorring'},
'618661148':{'en': 'Koorda'},
'618661149':{'en': 'Lancelin'},
'618661150':{'en': 'Meckering'},
'618661151':{'en': 'Miling'},
'618661152':{'en': 'Moora'},
'618661153':{'en': 'Northam'},
'618661154':{'en': 'Pantapin'},
'618661155':{'en': '<NAME>'},
'618661156':{'en': 'Quairading'},
'618661157':{'en': 'Regans Ford'},
'618661158':{'en': 'South Quairading'},
'618661159':{'en': 'Studleigh'},
'618661160':{'en': '<NAME>'},
'618661161':{'en': 'Tammin'},
'618661162':{'en': 'Trayning'},
'618661163':{'en': 'Wannamal'},
'618661164':{'en': 'Watheroo'},
'618661165':{'en': 'Wongan Hills'},
'618661166':{'en': 'Wubin'},
'618661167':{'en': 'Wubin West'},
'618661168':{'en': 'Wyalkatchem'},
'618661169':{'en': 'Yelbeni'},
'618661170':{'en': 'Yerecoin'},
'618661171':{'en': 'York'},
'618661172':{'en': 'Yorkrakine'},
'61866118':{'en': 'Bibby Springs'},
'61866119':{'en': 'Balkuling'},
'61866130':{'en': 'Coomallo'},
'61866149':{'en': 'York'},
'61866611':{'en': 'Dalwallinu'},
'61866612':{'en': 'Northam'},
'61866613':{'en': 'Beverley'},
'61866614':{'en': 'Beverley'},
'61866615':{'en': 'Beverley'},
'61866616':{'en': 'Beverley West'},
'61866617':{'en': 'Beverley West'},
'61866618':{'en': 'Beverley West'},
'61867000':{'en': 'Augusta'},
'61867001':{'en': 'Balingup'},
'61867002':{'en': 'Beedelup'},
'61867003':{'en': 'Boyup Brook'},
'61867004':{'en': 'Bridgetown'},
'61867005':{'en': '<NAME>'},
'61867006':{'en': 'Bunbury'},
'61867007':{'en': 'Busselton'},
'61867008':{'en': 'Capel'},
'61867009':{'en': 'Collie'},
'61867010':{'en': 'Cundinup'},
'61867011':{'en': 'Dardanup'},
'61867012':{'en': 'Darkan'},
'61867013':{'en': 'Dinninup'},
'61867014':{'en': '<NAME>'},
'61867015':{'en': 'Donnybrook'},
'61867016':{'en': 'Harvey'},
'61867017':{'en': 'Jangardup'},
'61867018':{'en': '<NAME>'},
'61867019':{'en': 'Manjimup'},
'61867020':{'en': '<NAME>'},
'61867021':{'en': 'Marybrook'},
'61867022':{'en': 'Myalup'},
'61867023':{'en': 'Nannup'},
'61867024':{'en': 'Nyamup'},
'61867025':{'en': 'Pemberton'},
'61867026':{'en': 'Tonebridge'},
'61867027':{'en': '<NAME>'},
'61867028':{'en': 'Waroona'},
'61867029':{'en': 'Wilga'},
'61867030':{'en': 'Augusta'},
'61867031':{'en': 'Balingup'},
'61867032':{'en': 'Beedelup'},
'61867033':{'en': '<NAME>'},
'61867034':{'en': 'Bridgetown'},
'61867035':{'en': '<NAME>'},
'61867036':{'en': 'Bunbury'},
'61867037':{'en': 'Busselton'},
'61867038':{'en': 'Capel'},
'61867039':{'en': 'Collie'},
'61867040':{'en': 'Cundinup'},
'61867041':{'en': 'Dardanup'},
'61867042':{'en': 'Darkan'},
'61867043':{'en': 'Dinninup'},
'61867044':{'en': '<NAME>'},
'61867045':{'en': 'Donnybrook'},
'61867046':{'en': 'Harvey'},
'61867047':{'en': 'Jangardup'},
'61867048':{'en': '<NAME>'},
'61867049':{'en': 'Manjimup'},
'61867050':{'en': '<NAME>'},
'61867051':{'en': 'Marybrook'},
'61867052':{'en': 'Myalup'},
'61867053':{'en': 'Nannup'},
'61867054':{'en': 'Nyamup'},
'61867055':{'en': 'Pemberton'},
'61867056':{'en': 'Tonebridge'},
'61867057':{'en': '<NAME>'},
'61867058':{'en': 'Waroona'},
'61867059':{'en': 'Wilga'},
'61867060':{'en': 'Augusta'},
'61867061':{'en': 'Balingup'},
'61867062':{'en': 'Beedelup'},
'61867063':{'en': '<NAME>'},
'61867064':{'en': 'Bridgetown'},
'61867065':{'en': '<NAME>'},
'61867066':{'en': 'Bunbury'},
'61867067':{'en': 'Busselton'},
'61867068':{'en': 'Capel'},
'61867069':{'en': 'Collie'},
'61867070':{'en': 'Cundinup'},
'61867071':{'en': 'Dardanup'},
'61867072':{'en': 'Darkan'},
'61867073':{'en': 'Dinninup'},
'61867074':{'en': '<NAME>'},
'61867075':{'en': 'Donnybrook'},
'61867076':{'en': 'Harvey'},
'61867077':{'en': 'Jangardup'},
'61867078':{'en': '<NAME>'},
'61867079':{'en': 'Manjimup'},
'61867080':{'en': '<NAME>'},
'61867081':{'en': 'Marybrook'},
'61867082':{'en': 'Myalup'},
'61867083':{'en': 'Nannup'},
'61867084':{'en': 'Nyamup'},
'61867085':{'en': 'Pemberton'},
'61867086':{'en': 'Tonebridge'},
'61867087':{'en': '<NAME>'},
'61867088':{'en': 'Waroona'},
'61867089':{'en': 'Wilga'},
'61867090':{'en': 'Augusta'},
'61867091':{'en': 'Balingup'},
'61867092':{'en': 'Beedelup'},
'61867093':{'en': '<NAME>'},
'61867094':{'en': 'Bridgetown'},
'61867095':{'en': '<NAME>'},
'61867096':{'en': 'Bunbury'},
'61867097':{'en': 'Busselton'},
'61867098':{'en': 'Capel'},
'61867099':{'en': 'Collie'},
'61867100':{'en': 'Cundinup'},
'61867101':{'en': 'Dardanup'},
'61867102':{'en': 'Darkan'},
'61867103':{'en': 'Dinninup'},
'61867104':{'en': '<NAME>'},
'61867105':{'en': 'Donnybrook'},
'61867106':{'en': 'Harvey'},
'61867107':{'en': 'Jangardup'},
'61867108':{'en': '<NAME>'},
'61867109':{'en': 'Manjimup'},
'61867110':{'en': '<NAME>'},
'61867111':{'en': 'Marybrook'},
'61867112':{'en': 'Myalup'},
'61867113':{'en': 'Nannup'},
'61867114':{'en': 'Nyamup'},
'61867115':{'en': 'Pemberton'},
'61867116':{'en': 'Tonebridge'},
'61867117':{'en': '<NAME>'},
'61867118':{'en': 'Waroona'},
'61867119':{'en': 'Wilga'},
'61867120':{'en': 'Augusta'},
'61867121':{'en': 'Balingup'},
'61867122':{'en': 'Beedelup'},
'61867123':{'en': '<NAME>'},
'61867124':{'en': 'Bridgetown'},
'61867125':{'en': '<NAME>'},
'61867126':{'en': 'Bunbury'},
'61867127':{'en': 'Busselton'},
'61867128':{'en': 'Capel'},
'61867129':{'en': 'Collie'},
'61867130':{'en': 'Cundinup'},
'61867131':{'en': 'Dardanup'},
'61867132':{'en': 'Darkan'},
'61867133':{'en': 'Dinninup'},
'61867134':{'en': '<NAME>'},
'61867135':{'en': 'Donnybrook'},
'61867136':{'en': 'Harvey'},
'61867137':{'en': 'Jangardup'},
'61867138':{'en': '<NAME>'},
'61867139':{'en': 'Manjimup'},
'61867140':{'en': '<NAME>'},
'61867141':{'en': 'Marybrook'},
'61867142':{'en': 'Myalup'},
'61867143':{'en': 'Nannup'},
'61867144':{'en': 'Nyamup'},
'61867145':{'en': 'Pemberton'},
'61867146':{'en': 'Tonebridge'},
'61867147':{'en': '<NAME>'},
'61867148':{'en': 'Waroona'},
'61867149':{'en': 'Wilga'},
'61867150':{'en': 'Busselton'},
'61867151':{'en': 'Marybrook'},
'61867152':{'en': 'Marybrook'},
'61867153':{'en': 'Marybrook'},
'61867154':{'en': 'Augusta'},
'61867155':{'en': 'Balingup'},
'61867156':{'en': 'Beedelup'},
'61867157':{'en': '<NAME>'},
'61867158':{'en': 'Bridgetown'},
'61867159':{'en': 'Brunswick Junction'},
'61867160':{'en': 'Bunbury'},
'61867161':{'en': 'Busselton'},
'61867162':{'en': 'Capel'},
'61867163':{'en': 'Collie'},
'61867164':{'en': 'Cundinup'},
'61867165':{'en': 'Dardanup'},
'61867166':{'en': 'Darkan'},
'61867167':{'en': 'Dinninup'},
'61867168':{'en': '<NAME>'},
| |
limit")
break
return clips, clipr
# TODO:
# Only reduce longitudinal sail/rudder authority when heeled.
# For control strategy:
# Maximize forwards force while providing at least X turning torque,
# provide range of turning torques to planner, generate approximatino
# of max forwards force as functino of turning torque, supply leeways.
# Maximum allowable torque is the maximum generatable from the rudder
# with current heel and sail. From there, we then begin to try
# to improve forwards force by following the gradient (we adjust the
# sail and then adjust the rudder, iteratively).
def SimpleControl(i, t, tw, vw, tc, vc, yaw, omega, goalyaw):
deltas = Norm(np.pi - Norm(tw)) / 2.0
deltar = np.clip(-Norm(goalyaw - yaw), -0.3, 0.3)
return deltas, deltar
def SailForcesAndTorque(physics, thetaw, vw, thetac, vc, deltas):
Fs, gammas, _, _ = physics.SailForces(thetaw, vw, deltas)
Fk, gammak = physics.KeelForces(thetac, vc)
heel, _ = physics.ApproxHeel(Fs, gammas, Fk, gammak, 0.0, 0.0)
taus, _ = physics.SailTorque(Fs, gammas, deltas, heel, 0, 0, 0)
Fslon = Fs * np.cos(gammas)
return Fslon, taus, heel
def PlotSail(physics, thetaw, vw, thetac, vc, fname=None):
maxsail = abs(Norm(np.pi - thetaw))
minsail = maxsail - np.pi / 2.0
minds = max(0.0, minsail) if thetaw > 0.0 else -maxsail
maxds = maxsail if thetaw > 0.0 else min(-minsail, 0.0)
Fss = []
tauss = []
heels = []
deltass = np.arange(minds, maxds, 0.01)
for deltas in deltass:
Fs, taus, heel = SailForcesAndTorque(
physics, thetaw, vw, thetac, vc, deltas)
Fss.append(Fs)
tauss.append(taus)
heels.append(heel)
tack = "running" if abs(thetaw) < 0.5 else \
"broad reach" if abs(thetaw) < 1.4 else \
"beam reach" if abs(thetaw) < 1.8 else \
"close reach" if abs(thetaw) < 2.5 else \
"close hauled" if abs(thetaw) < 2.8 else \
"in irons"
plt.figure()
plt.title("Sail Forces for Various $\delta_s$, thetaw=%f (%s)" % (thetaw, tack))
plt.plot(deltass, Fss, label="Sail forward force ($F_{s,lon}$)")
plt.plot(deltass, tauss, label="Sail yaw torque ($\\tau_s$)")
plt.xlabel("Sail angle, $\delta_s$ (radians), from %s (left) to %s (right)"
% ("fully stalled" if thetaw > 0 else "luffing",
"fully stalled" if thetaw <= 0 else "luffing"))
plt.ylabel("Force (N), Torque (N-m)")
plt.legend(loc='upper left')
ax = plt.twinx()
ax.plot(deltass, heels, 'r', label="Heel angle ($\psi$)")
ax.set_ylabel("Heel Angle (radians)")
ax.legend(loc='upper right')
plt.xlim((minds, maxds))
if fname != None:
plt.savefig(fname)
def PlotMaxForceForTorque(control, thetaw, vw, thetac, vc, taue, nsteps):
deltass, deltars, Flons, taues, mini, deltasmax, deltarmax = \
control.GlobalMaxForceTorque(thetaw, vw, thetac, vc, taue, 0.0, nsteps)
plt.figure()
plt.plot(deltass, Flons, label="$F_{lon}$")
plt.plot(deltass, taues, label="$\\tau_e$")
plt.legend(loc='upper left')
ax = plt.twinx()
ax.plot(deltass, deltars, 'r', label="$\delta_r$")
ax.legend(loc='upper right')
def PlotTrajectory(
sim, fcontrol, goalyaw, wind, title=None, fname=None, control=None):
control=None
if title:
print("Starting ", title)
if control:
control.Clear()
v0 = [0.0, 0.0]
omega0 = 0.0
heel0 = 0.0
dt = 0.01
niter = 3000
t = [dt * n for n in range(niter)]
xs, ys, vxs, vys, yaws, omegas, heels, thetacs, vcs, thetaws, vws,\
deltasopt, deltaropt = sim.Run(
wind, v0, omega0, heel0, fcontrol, dt, niter)
plt.figure()
if control:
plt.subplot(211)
plt.plot(t, yaws, 'b', label='yaw')
plt.plot(t, [goalyaw] * len(t), 'b--', label='goal yaw')
if control:
plt.plot(t[0:-1], control.yawrefs, 'b*', label='yawref')
plt.ylabel("Yaw (radians)")
l = plt.legend(loc='upper left')
l.set_zorder(0)
twin = plt.twinx()
twin.plot(t, vcs, 'g', label='speed')
twin.plot(t, omegas, 'r', label='omega')
twin.set_ylabel("Speed (m/s)")
l = twin.legend(loc='upper right')
l.set_zorder(0)
plt.xlabel("Time (sec)")
if title != None:
plt.title(title)
if control:
plt.subplot(212, sharex=twin)
plt.plot(t[:-1], [b[2, 0] for b in control.betas], 'b', label='Ar')
plt.plot(t[:-1], [b[3, 0] for b in control.betas], 'g', label='rs')
plt.plot(t[:-1], [b[4, 0] for b in control.betas], 'r', label='taubias')
plt.legend(loc='upper left')
plt.twinx()
plt.plot(t[0:-1], [t / 10. for t in control.torques], 'y', label='torques')
plt.legend(loc='upper right')
if fname != None:
plt.savefig(fname)
def MakeWind(speedmean, speedstd, dirmean, dirstd, n):
"""
Uses auto-regressive process to compute a set
of wind x/y velocities. Returns a 2-item list where each
item is a list of all the x/y velocities respectively.
"""
N = 10
phispeed = matlib.ones((1, N)) / N * 0.99
phidir = matlib.ones((1, N)) / N * 0.99
s0 = speedmean
d0 = dirmean
speeds = [s0]
dirs = [d0]
xs = []
ys = []
espeed = lambda: random.normal(speedmean, speedstd)
edir = lambda: random.normal(dirmean, dirstd)
for ii in range(1, n+1):
Xspeed = matlib.zeros(phispeed.shape).T
Xdir = matlib.zeros(phidir.shape).T
for jj in range(N):
idx = max(ii + jj - N, 0)
Xspeed[jj, 0] = speeds[idx] - speedmean
Xdir[jj, 0] = dirs[idx] - dirmean
speeds.append(float(phispeed * Xspeed + espeed()))
dirs.append(float(phidir * Xdir + edir()))
xs.append(speeds[-1] * np.cos(dirs[-1]))
ys.append(speeds[-1] * np.sin(dirs[-1]))
return [xs, ys]
if __name__ == "__main__":
sim = Physics()
wind = [0.0, -3.0]
v0 = [0.0, 0.0]
omega0 = 0.0
heel0 = 0.0
deltas = 0.0
deltar = 0.25
dt = 0.01
niter = 5000
t = [dt * n for n in range(niter)]
forces = DebugForces()
control = lambda i, t, tw, vw, tc, vc, yaw, om: (deltas, deltar)
xs, ys, vxs, vys, yaws, omegas, heels, thetacs, vcs, thetaws, vws, _, _ =\
sim.Run(
wind, v0, omega0, heel0, control, dt=dt, niter=niter)
if 0:
PlotSail(sim, 0.001, 3.0, 0.0, 1.0)
PlotSail(sim, np.pi / 4.0, 3.0, 0.0, 1.0)
PlotSail(sim, np.pi / 2.0, 3.0, 0.0, 1.0, 'sail_forces_beam.eps')
PlotSail(sim, 3 * np.pi / 4.0, 3.0, 0.0, 1.0)
PlotSail(sim, 7 * np.pi / 8.0, 3.0, 0.0, 1.0)
PlotSail(sim, 3.0, 3.0, 0.0, 1.0)
controlsim = Physics()
control = Controller(controlsim)
if 0:
PlotMaxForceForTorque(control, np.pi / 2.0, 3.0, 0.05, 0.4, -2.0, 50)
# control.MaxForceForTorque(-1.51716946346, 4.56503205727,
# -0.0564452767422, 0.648521086573, -1.57079632679, 0.25)
# control.MaxForceForTorque(-1.51638429183, 4.56599217829,
# -0.0695781219434, 0.640581832306, -1.57079632679, 0.25)
# control.MaxForceForTorque(-1.51716946346, 4.56503205727,
# -0.0564452767422, 0.648521086573, -1.57079632679, 0.25)
# control.MaxForceForTorque(-1.51638429183, 4.56599217829,
# -0.0695781219434, 0.640581832306, -1.57079632679, 0.25)
# sys.exit()
#deltasopt = []
#deltaropt = []
#for i in range(len(thetaws)):
# print(i)
# ds = deltasopt[-1] if len(deltasopt) > 0 else deltas
# ds = np.clip(Norm(np.pi - thetaws[i]), -np.pi / 2.0, np.pi / 2.0)
# ds = abs(ds) if thetaws[i] > 0 else -abs(ds)
# dsopt, dropt = control.MaxForceForTorque(
# thetaws[i], vws[i], thetacs[i], vcs[i], ds, deltar)
# print("ds ", dsopt, " dr ", dropt)
# deltasopt.append(dsopt)
# deltaropt.append(dropt)
gyaw = 0.1
control.goalyaw = gyaw
simple_ctrl = lambda i, t, tw, vw, tc, vc, yaw, om: \
SimpleControl(i, t, tw, vw, tc, vc, yaw, om, control.goalyaw)
PlotTrajectory(sim, simple_ctrl, control.goalyaw, wind,
title="Old Controller", fname="old_beam.eps")
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Nominal Conditions",
fname="full_nominal_beam.eps", control=control)
old_wind = wind
wind = MakeWind(3.0, 0.1, -np.pi / 2.0, 0.05, 3000)
controlsim = Physics()
control = Controller(controlsim)
control.goalyaw = gyaw
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Nominal Conditions, Noisy Wind",
fname="full_nominal_beam_noisy_wind.eps", control=control)
wind = old_wind
controlsim = Physics()
control = Controller(controlsim)
control.goalyaw = gyaw
control.Kbeta *= 0.0
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Nominal Conditions, $K_\\beta = 0$",
fname="kb0_nominal_beam.eps", control=control)
controlsim.rs += 0.5
controlsim.hs *= 0.7
controlsim.Blon -= 10
controlsim.keel.A *= 0.8
# controlsim.sail.A *= 0.8
controlsim.rr *= 1.2
controlsim.Blat *= 0.9
controlsim.Bomega *= 5.0
control = Controller(copy.deepcopy(controlsim))
control.goalyaw = gyaw
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Skewed Simulation",
fname="full_skewed_beam.eps", control=control)
control = Controller(copy.deepcopy(controlsim))
control.Kref = 0.99
control.goalyaw = gyaw
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Skewed, Kref=0.99",
fname="kref99_skew_beam.eps", control=control)
control = Controller(copy.deepcopy(controlsim))
control.Kref = 0.9
control.goalyaw = gyaw
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Skewed, Kref=0.9",
fname="kref9_skew_beam.eps", control=control)
control = Controller(copy.deepcopy(controlsim))
control.Kref = 1.0
control.goalyaw = gyaw
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Skewed, Kref=1.0",
fname="kref1_skew_beam.eps", control=control)
control = Controller(copy.deepcopy(controlsim))
control.Kref = 0.0
control.goalyaw = gyaw
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Skewed, Kref=0",
fname="kref0_skew_beam.eps", control=control)
control = Controller(copy.deepcopy(controlsim))
control.goalyaw = gyaw
control.Kbeta *= 0.0
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Skewed Simulation, no correction",
fname="kb0_skewed_beam.eps", control=control)
gyaw = np.pi / 4.0
controlsim = Physics()
control = Controller(copy.deepcopy(controlsim))
control.goalyaw = gyaw
simple_ctrl = lambda i, t, tw, vw, tc, vc, yaw, om: \
SimpleControl(i, t, tw, vw, tc, vc, yaw, om, control.goalyaw)
PlotTrajectory(sim, simple_ctrl, control.goalyaw, wind,
title="Old Controller, upwind",
fname="old_upwind.eps")
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Nominal Conditions, upwind",
fname="full_nominal_upwind.eps")
control = Controller(copy.deepcopy(controlsim))
control.goalyaw = gyaw
control.maxyawrefvel = -1.0
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="No Ramp, upwind",
fname="full_nominal_upwind_noramp.eps")
control = Controller(copy.deepcopy(controlsim))
control.goalyaw = gyaw
control.maxyawrefvel = 0.2
control.maxyawrefacc = -1.0
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Inf accel ramp, upwind",
fname="inf_acc_ramp_upwind.eps")
control = Controller(copy.deepcopy(controlsim))
control.goalyaw = gyaw
control.maxyawrefacc = 0.2
control.Kbeta *= 0.0
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Nominal Conditions, upwind, $K_\\beta = 0$",
fname="kb0_nominal_upwind.eps")
plt.show()
sys.exit()
plt.figure()
axxy = plt.subplot(111)
axxy.plot(t, xs, 'b', label="x")
axxy.plot(t, ys, 'g', label="y")
axxy.plot(t, vxs, 'b*', | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c), 2018-2019, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author <NAME> <<EMAIL>>
#
#
# Note: Many tests are built using the examples of the XPath standards,
# published by W3C under the W3C Document License.
#
# References:
# http://www.w3.org/TR/1999/REC-xpath-19991116/
# http://www.w3.org/TR/2010/REC-xpath20-20101214/
# http://www.w3.org/TR/2010/REC-xpath-functions-20101214/
# https://www.w3.org/Consortium/Legal/2015/doc-license
# https://www.w3.org/TR/charmod-norm/
#
import unittest
import datetime
import io
import locale
import math
import os
import time
from decimal import Decimal
try:
import lxml.etree as lxml_etree
except ImportError:
lxml_etree = None
from elementpath import *
from elementpath.namespaces import XSI_NAMESPACE
from elementpath.compat import PY3
from elementpath.datatypes import DateTime, Date, Time, Timezone, \
DayTimeDuration, YearMonthDuration, UntypedAtomic, GregorianYear10
try:
from tests import test_xpath1_parser
except ImportError:
# Python2 fallback
import test_xpath1_parser
XML_GENERIC_TEST = test_xpath1_parser.XML_GENERIC_TEST
XML_POEM_TEST = """<poem author="<NAME>">
Kaum hat dies der Hahn gesehen,
Fängt er auch schon an zu krähen:
«Kikeriki! Kikikerikih!!»
Tak, tak, tak! - da kommen sie.
</poem>"""
class XPath2ParserTest(test_xpath1_parser.XPath1ParserTest):
def setUp(self):
self.parser = XPath2Parser(namespaces=self.namespaces, variables=self.variables)
# Make sure the tests are repeatable.
env_vars_to_tweak = 'LC_ALL', 'LANG'
self.current_env_vars = {v: os.environ.get(v) for v in env_vars_to_tweak}
for v in self.current_env_vars:
os.environ[v] = 'en_US.UTF-8'
def tearDown(self):
if hasattr(self, 'current_env_vars'):
for v in self.current_env_vars:
if self.current_env_vars[v] is not None:
os.environ[v] = self.current_env_vars[v]
def test_xpath_tokenizer2(self):
self.check_tokenizer("(: this is a comment :)",
['(:', '', 'this', '', 'is', '', 'a', '', 'comment', '', ':)'])
self.check_tokenizer("last (:", ['last', '', '(:'])
def test_token_tree2(self):
self.check_tree('(1 + 6, 2, 10 - 4)', '(, (, (+ (1) (6)) (2)) (- (10) (4)))')
self.check_tree('/A/B2 union /A/B1', '(union (/ (/ (A)) (B2)) (/ (/ (A)) (B1)))')
def test_token_source2(self):
self.check_source("(5, 6) instance of xs:integer+", '(5, 6) instance of xs:integer+')
self.check_source("$myaddress treat as element(*, USAddress)", "$myaddress treat as element(*, USAddress)")
def test_xpath_comments(self):
self.wrong_syntax("(: this is a comment :)")
self.wrong_syntax("(: this is a (: nested :) comment :)")
self.check_tree('child (: nasty (:nested :) axis comment :) ::B1', '(child (B1))')
self.check_tree('child (: nasty "(: but not nested :)" axis comment :) ::B1', '(child (B1))')
self.check_value("5 (: before operator comment :) < 4", False) # Before infix operator
self.check_value("5 < (: after operator comment :) 4", False) # After infix operator
self.check_value("true (:# nasty function comment :) ()", True)
self.check_tree(' (: initial comment :)/ (:2nd comment:)A/B1(: 3rd comment :)/ \nC1 (: last comment :)\t',
'(/ (/ (/ (A)) (B1)) (C1))')
def test_comma_operator(self):
self.check_value("1, 2", [1, 2])
self.check_value("(1, 2)", [1, 2])
self.check_value("(-9, 28, 10)", [-9, 28, 10])
self.check_value("(1, 2)", [1, 2])
root = self.etree.XML('<A/>')
self.check_selector("(7.0, /A, 'foo')", root, [7.0, root, 'foo'])
self.check_selector("7.0, /A, 'foo'", root, [7.0, root, 'foo'])
self.check_selector("/A, 7.0, 'foo'", self.etree.XML('<dummy/>'), [7.0, 'foo'])
def test_range_expressions(self):
# Some cases from https://www.w3.org/TR/xpath20/#construct_seq
self.check_value("1 to 2", [1, 2])
self.check_value("1 to 10", list(range(1, 11)))
self.check_value("(10, 1 to 4)", [10, 1, 2, 3, 4])
self.check_value("10 to 10", [10])
self.check_value("15 to 10", [])
self.check_value("fn:reverse(10 to 15)", [15, 14, 13, 12, 11, 10])
def test_parenthesized_expressions(self):
self.check_value("(1, 2, '10')", [1, 2, '10'])
self.check_value("()", [])
def test_if_expressions(self):
root = self.etree.XML('<A><B1><C1/><C2/></B1><B2/><B3><C3/><C4/><C5/></B3></A>')
self.check_value("if (1) then 2 else 3", 2)
self.check_selector("if (true()) then /A/B1 else /A/B2", root, root[:1])
self.check_selector("if (false()) then /A/B1 else /A/B2", root, root[1:2])
# Cases from XPath 2.0 examples
root = self.etree.XML('<part discounted="false"><wholesale/><retail/></part>')
self.check_selector(
'if ($part/@discounted) then $part/wholesale else $part/retail',
root, [root[0]], variables={'part': root}
)
root = self.etree.XML('<widgets>'
' <widget><unit-cost>25</unit-cost></widget>'
' <widget><unit-cost>10</unit-cost></widget>'
' <widget><unit-cost>15</unit-cost></widget>'
'</widgets>')
self.check_selector(
'if ($widget1/unit-cost < $widget2/unit-cost) then $widget1 else $widget2',
root, [root[2]], variables={'widget1': root[0], 'widget2': root[2]}
)
def test_quantifier_expressions(self):
# Cases from XPath 2.0 examples
root = self.etree.XML('<parts>'
' <part discounted="true" available="true" />'
' <part discounted="false" available="true" />'
' <part discounted="true" />'
'</parts>')
self.check_selector("every $part in /parts/part satisfies $part/@discounted", root, True)
self.check_selector("every $part in /parts/part satisfies $part/@available", root, False)
root = self.etree.XML('<emps>'
' <employee><salary>1000</salary><bonus>400</bonus></employee>'
' <employee><salary>1200</salary><bonus>300</bonus></employee>'
' <employee><salary>1200</salary><bonus>200</bonus></employee>'
'</emps>')
self.check_selector("some $emp in /emps/employee satisfies "
" ($emp/bonus > 0.25 * $emp/salary)", root, True)
self.check_selector("every $emp in /emps/employee satisfies "
" ($emp/bonus < 0.5 * $emp/salary)", root, True)
context = XPathContext(root=self.etree.XML('<dummy/>'))
self.check_value("some $x in (1, 2, 3), $y in (2, 3, 4) satisfies $x + $y = 4", True, context)
self.check_value("every $x in (1, 2, 3), $y in (2, 3, 4) satisfies $x + $y = 4", False, context)
self.check_value('some $x in (1, 2, "cat") satisfies $x * 2 = 4', True, context)
self.check_value('every $x in (1, 2, "cat") satisfies $x * 2 = 4', False, context)
def test_for_expressions(self):
# Cases from XPath 2.0 examples
context = XPathContext(root=self.etree.XML('<dummy/>'))
self.check_value("for $i in (10, 20), $j in (1, 2) return ($i + $j)", [11, 12, 21, 22], context)
root = self.etree.XML(
"""
<bib>
<book>
<title>TCP/IP Illustrated</title>
<author>Stevens</author>
<publisher>Addison-Wesley</publisher>
</book>
<book>
<title>Advanced Programming in the Unix Environment</title>
<author>Stevens</author>
<publisher>Addison-Wesley</publisher>
</book>
<book>
<title>Data on the Web</title>
<author>Abiteboul</author>
<author>Buneman</author>
<author>Suciu</author>
</book>
</bib>
""")
# Test step-by-step, testing also other basic features.
self.check_selector("book/author[1]", root, [root[0][1], root[1][1], root[2][1]])
self.check_selector("book/author[. = $a]", root, [root[0][1], root[1][1]], variables={'a': 'Stevens'})
self.check_tree("book/author[. = $a][1]", '(/ (book) ([ ([ (author) (= (.) ($ (a)))) (1)))')
self.check_selector("book/author[. = $a][1]", root, [root[0][1], root[1][1]], variables={'a': 'Stevens'})
self.check_selector("book/author[. = 'Stevens'][2]", root, [])
self.check_selector("for $a in fn:distinct-values(book/author) return $a",
root, ['Stevens', 'Abiteboul', 'Buneman', 'Suciu'])
self.check_selector("for $a in fn:distinct-values(book/author) return book/author[. = $a]",
root, [root[0][1], root[1][1]] + root[2][1:4])
self.check_selector("for $a in fn:distinct-values(book/author) return book/author[. = $a][1]",
root, [root[0][1], root[1][1]] + root[2][1:4])
self.check_selector(
"for $a in fn:distinct-values(book/author) return (book/author[. = $a][1], book[author = $a]/title)",
root,
[root[0][1], root[1][1], root[0][0], root[1][0], root[2][1], root[2][0], root[2][2], root[2][0],
root[2][3], root[2][0]]
)
def test_boolean_functions2(self):
root = self.etree.XML('<A><B1/><B2/><B3/></A>')
self.check_selector("boolean(/A)", root, True)
self.check_selector("boolean((-10, 35))", root, TypeError) # Sequence with two numeric values
self.check_selector("boolean((/A, 35))", root, True)
def test_numerical_expressions2(self):
self.check_value("5 idiv 2", 2)
self.check_value("-3.5 idiv -2", 1)
self.check_value("-3.5 idiv 2", -1)
self.wrong_value("-3.5 idiv 0")
self.wrong_value("xs:float('INF') idiv 2")
def test_comparison_operators(self):
super(XPath2ParserTest, self).test_comparison_operators()
self.check_value("0.05 eq 0.05", True)
self.check_value("19.03 ne 19.02999", True)
self.check_value("-1.0 eq 1.0", False)
self.check_value("1 le 2", True)
self.check_value("3 le 2", False)
self.check_value("5 ge 9", False)
self.check_value("5 gt 3", True)
self.check_value("5 lt 20.0", True)
self.check_value("false() eq 1", False)
self.check_value("0 eq false()", True)
self.check_value("2 * 2 eq 4", True)
self.check_value("() le 4")
self.check_value("4 gt ()")
self.check_value("() eq ()") # Equality of empty sequences is also an empty sequence
# From XPath 2.0 examples
root = self.etree.XML('<collection>'
' <book><author>Kafka</author></book>'
' <book><author>Huxley</author></book>'
' <book><author>Asimov</author></book>'
'</collection>')
context = XPathContext(root=root, variables={'book1': root[0]})
self.check_value('$book1 / author = "Kafka"', True, context=context)
self.check_value('$book1 / author eq "Kafka"', True, context=context)
self.check_value("(1, 2) = (2, 3)", True)
self.check_value("(2, 3) = (3, 4)", True)
self.check_value("(1, 2) = (3, 4)", False)
self.check_value("(1, 2) != (2, 3)", True) # != is not the inverse of =
context = XPathContext(root=root, variables={
'a': UntypedAtomic('1'), 'b': UntypedAtomic('2'), 'c': UntypedAtomic('2.0')
})
self.check_value('($a, $b) = ($c, 3.0)', False, context=context)
self.check_value('($a, $b) = ($c, 2.0)', True, context=context)
root = self.etree.XML('<root min="10" max="7"/>')
self.check_value('@min', [AttributeNode('min', '10')], context=XPathContext(root=root))
self.check_value('@min le @max', True, context=XPathContext(root=root))
root = self.etree.XML('<root min="80" max="7"/>')
self.check_value('@min le @max', False, context=XPathContext(root=root))
self.check_value('@min le @maximum', None, context=XPathContext(root=root))
root = self.etree.XML('<root><a>1</a><a>10</a><a>30</a><a>50</a></root>')
self.check_selector("a = (1 to 30)", root, True)
self.check_selector("a = (2)", root, False)
self.check_selector("a[1] = (1 to 10, 30)", root, True)
self.check_selector("a[2] = (1 to 10, 30)", root, True)
self.check_selector("a[3] = (1 to 10, 30)", root, True)
self.check_selector("a[4] = (1 to 10, 30)", root, False)
def test_number_functions2(self):
# Test cases taken from https://www.w3.org/TR/xquery-operators/#numeric-value-functions
self.check_value("abs(10.5)", 10.5)
self.check_value("abs(-10.5)", 10.5)
self.check_value("round-half-to-even(0.5)", 0)
self.check_value("round-half-to-even(1.5)", 2)
self.check_value("round-half-to-even(2.5)", 2)
self.check_value("round-half-to-even(3.567812E+3, 2)", 3567.81E0)
self.check_value("round-half-to-even(4.7564E-3, 2)", 0.0E0)
self.check_value("round-half-to-even(35612.25, -2)", 35600)
def test_sum_function(self):
self.check_value("sum((10, 15, 6, -2))", 29)
def test_avg_function(self):
context = XPathContext(root=self.etree.XML('<A/>'),
variables={
'd1': YearMonthDuration.fromstring("P20Y"),
'd2': YearMonthDuration.fromstring("P10M"),
'seq3': [3, 4, 5]
})
self.check_value("fn:avg($seq3)", 4.0, context=context)
self.check_value("fn:avg(($d1, $d2))", YearMonthDuration.fromstring("P125M"), context=context)
root_token = self.parser.parse("fn:avg(($d1, $seq3))")
self.assertRaises(TypeError, root_token.evaluate, context=context)
self.check_value("fn:avg(())", [])
self.check_value("fn:avg($seq3)", 4.0, context=context)
root_token = self.parser.parse("fn:avg((xs:float('INF'), xs:float('-INF')))")
self.assertTrue(math.isnan(root_token.evaluate(context)))
root_token = self.parser.parse("fn:avg(($seq3, xs:float('NaN')))")
self.assertTrue(math.isnan(root_token.evaluate(context)))
root = self.etree.XML('<a><b>1</b><b>9</b></a>')
self.check_selector('avg(/a/b/number(text()))', root, 5)
def test_max_function(self):
self.check_value("fn:max((3,4,5))", 5)
self.check_value("fn:max((5, 5.0e0))", 5.0e0)
if PY3:
self.wrong_type("fn:max((3,4,'Zero'))")
else:
self.check_value("fn:max((3,4,'Zero'))", 'Zero')
dt = datetime.datetime.now()
self.check_value('fn:max((fn:current-date(), xs:date("2001-01-01")))',
Date(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo))
self.check_value('fn:max(("a", "b", "c"))', 'c')
root = self.etree.XML('<a><b>1</b><b>9</b></a>')
self.check_selector('max(/a/b/number(text()))', root, 9)
def test_min_function(self):
self.check_value("fn:min((3,4,5))", 3)
self.check_value("fn:min((5, 5.0e0))", 5.0e0)
self.check_value("fn:min((xs:float(0.0E0), xs:float(-0.0E0)))", 0.0)
self.check_value('fn:min((fn:current-date(), xs:date("2001-01-01")))', Date.fromstring("2001-01-01"))
self.check_value('fn:min(("a", "b", "c"))', 'a')
root = self.etree.XML('<a><b>1</b><b>9</b></a>')
self.check_selector('min(/a/b/number(text()))', root, 1)
###
# Functions on strings
def test_codepoints_to_string_function(self):
| |
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.genmod.families import links
from tabulate import tabulate
from zepid.calc.utils import (risk_ci, incidence_rate_ci, risk_ratio, risk_difference, number_needed_to_treat,
odds_ratio, incidence_rate_difference, incidence_rate_ratio, sensitivity, specificity)
#########################################################################################################
# Measures of effect / association
#########################################################################################################
class RiskRatio:
r"""Estimate of Risk Ratio with a (1-alpha)*100% Confidence interval from a pandas DataFrame. Missing data is
ignored. Exposure categories should be mutually exclusive
Risk ratio is calculated from
.. math::
RR = \frac{\Pr(Y|A=1)}{\Pr(Y|A=0)}
Risk ratio standard error is
.. math::
SE = \left(\frac{1}{a} - \frac{1}{a + b} + \frac{1}{c} - \frac{1}{c + d}\right)^{\frac{1}{2}}
Note
----
Outcome must be coded as (1: yes, 0:no). Only works supports binary outcomes
Parameters
------------
reference : integer, optional
Reference category for comparisons. Default reference category is 0
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the risk ratio in a data set
>>> from zepid import RiskRatio, load_sample_data
>>> df = load_sample_data(False)
>>> rr = RiskRatio()
>>> rr.fit(df, exposure='art', outcome='dead')
>>> rr.summary()
Calculate the risk ratio with exposure of '1' as the reference category
>>> rr = RiskRatio(reference=1)
>>> rr.fit(df, exposure='art', outcome='dead')
>>> rr.summary()
Generate a plot of the calculated risk ratio(s)
>>> import matplotlib.pyplot as plt
>>> rr = RiskRatio()
>>> rr.fit(df, exposure='art', outcome='dead')
>>> rr.plot()
>>> plt.show()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.risks = []
self.risk_ratio = []
self.results = None
self._a_list = []
self._b_list = []
self._c = None
self._d = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
def fit(self, df, exposure, outcome):
"""Calculates the Risk Ratio given a data set
Parameters
------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable
outcome : string
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
"""
# Setting up holders for results
risk_lcl = []
risk_ucl = []
risk_sd = []
rr_lcl = []
rr_ucl = []
rr_sd = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._d = df.loc[(df[exposure] == self.reference) & (df[outcome] == 0)].shape[0]
self._labels.append('Ref:'+str(self.reference))
ri, lr, ur, sd, *_ = risk_ci(events=self._c, total=(self._c + self._d), alpha=self.alpha)
self.risks.append(ri)
risk_lcl.append(lr)
risk_ucl.append(ur)
risk_sd.append(sd)
self.risk_ratio.append(1)
rr_lcl.append(None)
rr_ucl.append(None)
rr_sd.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
b = df.loc[(df[exposure] == i) & (df[outcome] == 0)].shape[0]
self._b_list.append(b)
ri, lr, ur, sd, *_ = risk_ci(events=a, total=(a+b), alpha=self.alpha)
self.risks.append(ri)
risk_lcl.append(lr)
risk_ucl.append(ur)
risk_sd.append(sd)
em, lcl, ucl, sd, *_ = risk_ratio(a=a, b=b, c=self._c, d=self._d, alpha=self.alpha)
self.risk_ratio.append(em)
rr_lcl.append(lcl)
rr_ucl.append(ucl)
rr_sd.append(sd)
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['Risk'] = self.risks
rf['SD(Risk)'] = risk_sd
rf['Risk_LCL'] = risk_lcl
rf['Risk_UCL'] = risk_ucl
rf['RiskRatio'] = self.risk_ratio
rf['SD(RR)'] = rr_sd
rf['RR_LCL'] = rr_lcl
rf['RR_UCL'] = rr_ucl
rf['CLR'] = rf['RR_UCL'] / rf['RR_LCL']
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
------------
decimal : integer, optional
Decimal points to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for a, b, l in zip(self._a_list, self._b_list, self._labels):
print('Comparison:'+str(self.reference)+' to '+self._labels[self._labels.index(l)+1])
print(tabulate([['E=1', a, b], ['E=0', self._c, self._d]], headers=['', 'D=1', 'D=0'],
tablefmt='grid'), '\n')
print('======================================================================')
print(' Risk Ratio ')
print('======================================================================')
print(self.results[['Risk', 'SD(Risk)', 'Risk_LCL', 'Risk_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print(self.results[['RiskRatio', 'SD(RR)', 'RR_LCL', 'RR_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('======================================================================')
def plot(self, measure='risk_ratio', scale='linear', center=1, **errorbar_kwargs):
"""Plot the risk ratios or the risks along with their corresponding confidence intervals. This option is an
alternative to `summary()`, which displays results in a table format.
Parameters
----------
measure : str, optional
Whether to display risk ratios or risks. Default is to display the risk ratio. Options are;
* 'risk_ratio' : display risk ratios
* 'risk' : display risks
scale : str, optional
Scale for the x-axis. Default is a linear scale. A log-scale can be requested by setting scale='log'
center : str, optional
Sets a reference line. For the risk ratio, the reference line defaults to 1. For risks, no reference line is
displayed.
errorbar_kwargs: add additional kwargs to be passed to the plotting function ``matplotlib.errorbar``. See
defaults here: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
Returns
-------
matplotlib axes
"""
if measure == 'risk_ratio':
ax = _plotter(estimate=self.results['RiskRatio'], lcl=self.results['RR_LCL'], ucl=self.results['RR_UCL'],
labels=self.results.index,
center=center, **errorbar_kwargs)
if scale == 'log':
ax.set_xscale('log')
ax.set_title('Risk Ratio')
elif measure == 'risk':
ax = _plotter(estimate=self.results['Risk'], lcl=self.results['Risk_LCL'], ucl=self.results['Risk_UCL'],
labels=self.results.index,
center=np.nan, **errorbar_kwargs)
ax.set_title('Risk')
ax.set_xlim([0, 1])
else:
raise ValueError('Must specify either "risk_ratio" or "risk" for plots')
return ax
class RiskDifference:
r"""Estimate of Risk Difference with a (1-alpha)*100% Confidence interval from a pandas DataFrame. Missing data is
ignored. Exposure categories should be mutually exclusive
Risk difference is calculated as
.. math::
RD = \Pr(Y|A=1) - \Pr(Y|A=0)
Risk difference standard error is calculated as
.. math::
SE = \left(\frac{R_1 \times (1 - R_1)}{a+b} + \frac{R_0 \times (1-R_0)}{c+d}\right)^{\frac{1}{2}}
In addition to confidence intervals, the Frechet bounds are calculated as well. These probability bounds are useful
for a comparison. Within these bounds, the true causal risk difference in the sample must live. The only
assumptions these bounds require are no measurement error, causal consistency, no selection bias, and any missing
data is MCAR. These bounds are always unit width (width of one), but they do not require any assumptions regarding
confounding / conditional exchangeability. They are calculated via the following formula
.. math::
Lower = \Pr(Y|A=a)\Pr(A=a) - \Pr(Y|A \ne a)\Pr(A \ne a) - \Pr(A=a)\\
Upper = \Pr(Y|A=a)\Pr(A=a) + \Pr(A \ne a) - \Pr(Y|A \ne a)\Pr(A \ne a)
For further details on these bounds, see the references
Note
----
Outcome must be coded as (1: yes, 0:no). Only supports binary outcomes
Parameters
------------
reference : integer, optional
-reference category for comparisons. Default reference category is 0
alpha : float, optional
-Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
References
----------
Cole SR et al. (2019) Nonparametric Bounds for the Risk Function. American Journal of Epidemiology. 188(4), 632-636
Examples
--------
Calculate the risk difference in a data set
>>> from zepid import RiskDifference, load_sample_data
>>> df = load_sample_data(False)
>>> rd = RiskDifference()
>>> rd.fit(df, exposure='art', outcome='dead')
>>> rd.summary()
Calculate the risk difference with exposure of '1' as the reference category
>>> rd = RiskDifference(reference=1)
>>> rd.fit(df, exposure='art', outcome='dead')
>>> rd.summary()
Generate a plot of the calculated risk difference(s)
>>> import matplotlib.pyplot as plt
>>> rd = RiskDifference()
>>> rd.fit(df, exposure='art', outcome='dead')
>>> rd.plot()
>>> plt.show()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.risks = []
self.risk_difference = []
self.results = None
self._a_list = []
self._b_list = []
self._c = None
self._d = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
self.n = None
def fit(self, df, exposure, outcome):
"""Calculates the Risk Difference
Parameters
------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable
outcome : string
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
"""
n = df.dropna(subset=[exposure, outcome]).shape[0]
# Setting up holders for results
risk_lcl = []
risk_ucl = []
risk_sd = []
rd_lcl = []
rd_ucl = []
rd_sd = []
fr_lower = []
fr_upper = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._d = df.loc[(df[exposure] == self.reference) & (df[outcome] == 0)].shape[0]
self._labels.append('Ref:' + str(self.reference))
ri, lr, ur, sd, *_ = risk_ci(events=self._c, total=(self._c + self._d), alpha=self.alpha)
self.risks.append(ri)
risk_lcl.append(lr)
risk_ucl.append(ur)
risk_sd.append(sd)
| |
formats in future.
ext2data = {
'json': (open, '', json),
'pkl': (open, 'b', pickle),
'zip': (BZ2File, '', pickle),
}
opener, mode_suffix, saver = ext2data[ext]
return opener, mode + mode_suffix, saver
def save(obj, path, mode_pre='w', verbose=True):
"""Wrapper to save data as text, pickle (optionally zipped), or json.
Parameters
-----------
obj: any
Object to save. This will be pickled/jsonified/zipped inside the
function - do not convert it before-hand.
path: str
File name to save object to. Should end with .txt, .sh, md, .pkl, .zip,
or .json depending on desired output format. If .zip is used, object
will be zipped and then pickled. (.sh and .md will be treated
identically to .txt.)
mode_pre: str
Determines whether to write or append text. One of ('w', 'a').
verbose: bool
If True, print a message confirming that the data was pickled, along
with its path.
Returns
-------
None
"""
path = Path(path)
os.makedirs(path.parent, exist_ok=True)
if verbose: print(f'Writing data to {path}.')
if path.suffix[1:] in ('txt', 'sh', 'md', 'py'):
with path.open(mode_pre) as f:
f.write(obj)
else:
opener, mode, saver = _read_write_args(str(path), mode_pre)
with opener(path, mode) as f:
saver.dump(obj, f)
def load(path, verbose=True):
"""Wrapper to load text files or pickled (optionally zipped) or json data.
Parameters
----------
path : str
File to load. File type will be inferred from extension. Must be one of
'.txt', '.sh', 'md', '.json', '.pkl', or '.zip'.
verbose : bool, optional
If True, will print message stating where object was loaded from.
Returns
-------
object: The Python object that was pickled to the specified file.
"""
path = Path(path)
if path.suffix[1:] in ('txt', 'sh', 'md', 'py'):
return path.read_text()
opener, mode, saver = _read_write_args(str(path), 'r')
with opener(path, mode) as f:
data = saver.load(f)
if verbose: print(f'Object loaded from {path}.')
return data
def dict_sum(*args):
"""Given two or more dictionaries with numeric values, combine them into a
single dictionary. For keys that appear in multiple dictionaries, their
corresponding values are added to produce the new value.
This differs from combining two dictionaries in the following manner:
{**d1, **d2}
The method shown above will combine the keys but will retain the value
from d2, rather than adding the values from d1 and d2.
Parameters
-----------
*args: dicts
2 or more dictionaries with numeric values.
Returns
--------
dict: Contains all keys which appear in any of the dictionaries that are
passed in. The corresponding values from each dictionary containing a
given key are summed to produce the new value.
Examples
---------
>>> d1 = {'a': 1, 'b': 2, 'c': 3}
>>> d2 = {'a': 10, 'c': -20, 'd': 30}
>>> d3 = {'c': 10, 'd': 5, 'e': 0}
>>> dict_sum(d1, d2)
{'a': 11, 'b': 2, 'c': -7, 'd': 35, 'e': 0}
"""
keys = {key for d in args for key in d.keys()}
return {key: sum(d.get(key, 0) for d in args)
for key in keys}
def _select_mapping(items, keep=(), drop=()):
"""Helper function for `select`.
Parameters
----------
items: Mapping
Dict (or similar mapping) to select/drop from.
keep: Iterable[str]
Sequence of keys to keep.
drop: Iterable[str]
Sequence of keys to drop. You should specify either `keep` or `drop`,
not both.
Returns
-------
Dict
"""
if keep:
return {k: items[k] for k in keep}
return {k: v for k, v in items.items() if k not in set(drop)}
def _select_sequence(items, keep=(), drop=()):
"""Helper function for `select` that works on sequences (basically
collections that support enumeration).
Parameters
----------
items: Sequence
List, tuple, or iterable sequence of some sort to select items from.
keep: Iterable[str]
Sequence of indices to keep.
drop: Iterable[str]
Sequence of indices to drop. You should specify either `keep` or
`drop`, not both.
Returns
-------
Same type as `items` (usually a list or tuple).
"""
type_ = type(items)
if keep:
return type_(x for i, x in enumerate(items) if i in set(keep))
return type_(x for i, x in enumerate(items) if i not in set(drop))
def select(items, keep=(), drop=()):
"""Select a subset of a data structure. When used on a mapping (e.g. dict),
you can specify a list of keys to include or exclude. When used on a
sequence like a list or tuple, specify indices instead of keys.
Parameters
----------
items: abc.Sequence or abc.Mapping
The dictionary to select items from.
keep: Iterable[str]
Sequence of keys to keep.
drop: Iterable[str]
Sequence of keys to drop. You should specify either `keep` or `drop`,
not both.
Returns
-------
dict: Dictionary containing only the specified keys (when passing in
`keep`), or all keys except the specified ones (when passing in
`drop`).
"""
if bool(keep) + bool(drop) != 1:
raise InvalidArgumentError('Specify exactly one of `keep` or `drop`.')
if isinstance(items, Mapping):
return _select_mapping(items, keep, drop)
elif isinstance(items, Sequence):
return _select_sequence(items, keep, drop)
else:
raise InvalidArgumentError('`items` must be a Mapping or Sequence.')
def differences(obj1, obj2, methods=False, **kwargs):
"""Find the differences between two objects (generally of the same type -
technically this isn't enforced but we do require that the objects have
the same set of attribute names so a similar effect is achieved. Actual
type checking was causing problems comparing multiple Args instances,
presumably because each Args object is defined when called).
This is a way to get more detail beyond whether two objects are equal or
not.
Parameters
-----------
obj1: any
An object.
obj2: any, usually the same type as obj1
An object.
methods: bool
If True, include methods in the comparison. If False, only attributes
will be compared. Note that the output may not be particularly
interpretable when using method=True; for instance when comparing two
strings consisting of different characters, we get a lot of output
that looks like this:
{'islower': (<function str.islower()>, <function str.islower()>),
'isupper': (<function str.isupper()>, <function str.isupper()>),...
'istitle': (<function str.istitle()>, <function str.istitle()>)}
These attributes all reflect the same difference: if obj1 is 'abc'
and obj2 is 'def', then
'abc' != 'def' and
'ABC' != 'DEF' abd
'Abc' != 'Def'.
When method=False, we ignore all of these, such that
differences('a', 'b') returns {}. Therefore, it is important to
carefully consider what differences you care about identifying.
**kwargs: bool
Can pass args to hdir to include magics or internals.
Returns
--------
dict[str, tuple]: Maps attribute name to a tuple of values, where the
first is the corresponding value for obj1 and the second is the
corresponding value for obj2.
"""
# May built-in comparison functionality. Keep error handling broad.
try:
if obj1 == obj2:
return {}
except Exception:
pass
attr1, attr2 = hdir(obj1, **kwargs), hdir(obj2, **kwargs)
assert attr1.keys() == attr2.keys(), 'Objects must have same attributes.'
diffs = {}
for (k1, v1), (k2, v2) in zip(attr1.items(), attr2.items()):
# Only compare non-callable attributes.
if not (methods or v1 == 'attribute'):
continue
# Comparisons work differently for arrays/tensors than other objects.
val1, val2 = getattr(obj1, k1), getattr(obj2, k2)
try:
equal = (val1 == val2).all()
except AttributeError:
equal = val1 == val2
# Store values that are different for obj1 and obj2.
if not equal:
diffs[k1] = (val1, val2)
return diffs
def catch(func, *args, verbose=False):
"""Error handling for list comprehensions. In practice, it's recommended
to use the higher-level robust_comp() function which uses catch() under the
hood.
Parameters
-----------
func: function
*args: any type
Arguments to be passed to func.
verbose: bool
If True, print the error message should one occur.
Returns
--------
any type: If the function executes successfully, its output is returned.
Otherwise, return None.
Examples
---------
[catch(lambda x: 1 / x, i) for i in range(3)]
>>> [None, 1.0, 0.5]
# Note that the filtering method shown below also removes zeros which is
# okay in this case.
list(filter(None, [catch(lambda x: 1 / x, i) for i in range(3)]))
>>> [1.0, 0.5]
"""
try:
return func(*args)
except Exception as e:
if verbose: print(e)
return
def safe_map(func, seq):
"""This addresses the issue of error handling in map() or list
comprehension operations by simply skipping any items that throw an error.
Note that values of None will be removed from the resulting list.
| |
persons or things",
"80060": "liability, governmental: tort or contract actions by or against government or governmental officials other than defense of criminal actions brought under a civil rights action.",
"80070": "liability, other than as in sufficiency of evidence, election of remedies, punitive damages",
"80080": "liability, punitive damages",
"80090": "Employee Retirement Income Security Act (cf. union trust funds)",
"80100": "state or local government tax",
"80105": "state and territorial land claims",
"80110": "state or local government regulation, especially of business (cf. federal pre-emption of state court jurisdiction, federal pre-emption of state legislation or regulation)",
"80120": "federal or state regulation of securities",
"80130": "natural resources - environmental protection (cf. national supremacy: natural resources, national supremacy: pollution)",
"80140": "corruption, governmental or governmental regulation of other than as in campaign spending",
"80150": "zoning: constitutionality of such ordinances, or restrictions on owners' or lessors' use of real property",
"80160": "arbitration (other than as pertains to labor-management or employer-employee relations (cf. union arbitration)",
"80170": "federal or state consumer protection: typically under the Truth in Lending; Food, Drug and Cosmetic; and Consumer Protection Credit Acts",
"80180": "patents and copyrights: patent",
"80190": "patents and copyrights: copyright",
"80200": "patents and copyrights: trademark",
"80210": "patents and copyrights: patentability of computer processes",
"80220": "federal or state regulation of transportation regulation: railroad",
"80230": "federal and some few state regulations of transportation regulation: boat",
"80240": "federal and some few state regulation of transportation regulation:truck, or motor carrier",
"80250": "federal and some few state regulation of transportation regulation: pipeline (cf. federal public utilities regulation: gas pipeline)",
"80260": "federal and some few state regulation of transportation regulation: airline",
"80270": "federal and some few state regulation of public utilities regulation: electric power",
"80280": "federal and some few state regulation of public utilities regulation: nuclear power",
"80290": "federal and some few state regulation of public utilities regulation: oil producer",
"80300": "federal and some few state regulation of public utilities regulation: gas producer",
"80310": "federal and some few state regulation of public utilities regulation: gas pipeline (cf. federal transportation regulation: pipeline)",
"80320": "federal and some few state regulation of public utilities regulation: radio and television (cf. cable television)",
"80330": "federal and some few state regulation of public utilities regulation: cable television (cf. radio and television)",
"80340": "federal and some few state regulations of public utilities regulation: telephone or telegraph company",
"80350": "miscellaneous economic regulation",
"90010": "comity: civil rights",
"90020": "comity: criminal procedure",
"90030": "comity: First Amendment",
"90040": "comity: habeas corpus",
"90050": "comity: military",
"90060": "comity: obscenity",
"90070": "comity: privacy",
"90080": "comity: miscellaneous",
"90090": "comity primarily removal cases, civil procedure (cf. comity, criminal and First Amendment); deference to foreign judicial tribunals",
"90100": "assessment of costs or damages: as part of a court order",
"90110": "Federal Rules of Civil Procedure including Supreme Court Rules, application of the Federal Rules of Evidence, Federal Rules of Appellate Procedure in civil litigation, Circuit Court Rules, and state rules and admiralty rules",
"90120": "judicial review of administrative agency's or administrative official's actions and procedures",
"90130": "mootness (cf. standing to sue: live dispute)",
"90140": "venue",
"90150": "no merits: writ improvidently granted",
"90160": "no merits: dismissed or affirmed for want of a substantial or properly presented federal question, or a nonsuit",
"90170": "no merits: dismissed or affirmed for want of jurisdiction (cf. judicial administration: Supreme Court jurisdiction or authority on appeal from federal district courts or courts of appeals)",
"90180": "no merits: adequate non-federal grounds for decision",
"90190": "no merits: remand to determine basis of state or federal court decision (cf. judicial administration: state law)",
"90200": "no merits: miscellaneous",
"90210": "standing to sue: adversary parties",
"90220": "standing to sue: direct injury",
"90230": "standing to sue: legal injury",
"90240": "standing to sue: personal injury",
"90250": "standing to sue: justiciable question",
"90260": "standing to sue: live dispute",
"90270": "standing to sue: parens patriae standing",
"90280": "standing to sue: statutory standing",
"90290": "standing to sue: private or implied cause of action",
"90300": "standing to sue: taxpayer's suit",
"90310": "standing to sue: miscellaneous",
"90320": "judicial administration: jurisdiction or authority of federal district courts or territorial courts",
"90330": "judicial administration: jurisdiction or authority of federal courts of appeals",
"90340": "judicial administration: Supreme Court jurisdiction or authority on appeal or writ of error, from federal district courts or courts of appeals (cf. 753)",
"90350": "judicial administration: Supreme Court jurisdiction or authority on appeal or writ of error, from highest state court",
"90360": "judicial administration: jurisdiction or authority of the Court of Claims",
"90370": "judicial administration: Supreme Court's original jurisdiction",
"90380": "judicial administration: review of non-final order",
"90390": "judicial administration: change in state law (cf. no merits: remand to determine basis of state court decision)",
"90400": "judicial administration: federal question (cf. no merits: dismissed for want of a substantial or properly presented federal question)",
"90410": "judicial administration: ancillary or pendent jurisdiction",
"90420": "judicial administration: extraordinary relief (e.g., mandamus, injunction)",
"90430": "judicial administration: certification (cf. objection to reason for denial of certiorari or appeal)",
"90440": "judicial administration: resolution of circuit conflict, or conflict between or among other courts",
"90450": "judicial administration: objection to reason for denial of certiorari or appeal",
"90460": "judicial administration: collateral estoppel or res judicata",
"90470": "judicial administration: interpleader",
"90480": "judicial administration: untimely filing",
"90490": "judicial administration: Act of State doctrine",
"90500": "judicial administration: miscellaneous",
"90510": "Supreme Court's certiorari, writ of error, or appeals jurisdiction",
"90520": "miscellaneous judicial power, especially diversity jurisdiction",
}
def __init__(
self,
data_dir: Union[str, pathlib.Path] = constants.DEFAULT_DATA_DIR.joinpath(NAME),
):
super().__init__(NAME, meta=META)
self.data_dir = utils.to_path(data_dir).resolve()
self._filename = "supreme-court-py3.json.gz"
self._filepath = self.data_dir.joinpath(self._filename)
@property
def filepath(self) -> Optional[str]:
"""
Full path on disk for SupremeCourt data as compressed json file.
``None`` if file is not found, e.g. has not yet been downloaded.
"""
if self._filepath.is_file():
return str(self._filepath)
else:
return None
def download(self, *, force: bool = False) -> None:
"""
Download the data as a Python version-specific compressed json file and
save it to disk under the ``data_dir`` directory.
Args:
force: If True, download the dataset, even if it already exists
on disk under ``data_dir``.
"""
data_version = 1.0
release_tag = f"supreme_court_py3_v{data_version}"
url = urllib.parse.urljoin(DOWNLOAD_ROOT, release_tag + "/" + self._filename)
tio.download_file(
url, filename=self._filename, dirpath=self.data_dir, force=force,
)
def __iter__(self):
if not self._filepath.is_file():
raise OSError(
f"dataset file {self._filepath} not found;\n"
"has the dataset been downloaded yet?"
)
for record in tio.read_json(self._filepath, mode="rt", lines=True):
yield record
def _get_filters(
self, opinion_author, decision_direction, issue_area, date_range, min_len,
):
filters = []
if min_len is not None:
if min_len < 1:
raise ValueError("`min_len` must be at least 1")
filters.append(lambda record: len(record.get("text", "")) >= min_len)
if date_range is not None:
date_range = utils.validate_and_clip_range(
date_range, self.full_date_range, val_type=(str, bytes)
)
filters.append(
lambda record: (
record.get("decision_date")
and date_range[0] <= record["decision_date"] < date_range[1]
)
)
if opinion_author is not None:
opinion_author = utils.validate_set_members(
opinion_author, int, valid_vals=self.opinion_author_codes
)
filters.append(
lambda record: record.get("maj_opinion_author") in opinion_author
)
if decision_direction is not None:
decision_direction = utils.validate_set_members(
decision_direction, (str, bytes), valid_vals=self.decision_directions
)
filters.append(
lambda record: record.get("decision_direction") in decision_direction
)
if issue_area is not None:
issue_area = utils.validate_set_members(
issue_area, int, valid_vals=self.issue_area_codes
)
filters.append(lambda record: record.get("issue_area") in issue_area)
return filters
def _filtered_iter(self, filters):
if filters:
for record in self:
if all(filter_(record) for filter_ in filters):
yield record
else:
for record in self:
yield record
def texts(
self,
*,
opinion_author: Optional[Union[int, Set[int]]] = None,
decision_direction: Optional[Union[str, Set[str]]] = None,
issue_area: Optional[Union[int, Set[int]]] = None,
date_range: Optional[Tuple[Optional[str], Optional[str]]] = None,
min_len: Optional[int] = None,
limit: Optional[int] = None,
) -> Iterable[str]:
"""
Iterate over decisions in this dataset, optionally filtering by a variety
of metadata and/or text length, and yield texts only,
in chronological order by decision date.
Args:
opinion_author: Filter decisions by the name(s) of the majority opinion's author,
coded as an integer whose mapping is given in
:attr:`SupremeCourt.opinion_author_codes`.
decision_direction: Filter decisions by the ideological direction
of the majority's decision; see
:attr:`SupremeCourt.decision_directions`.
issue_area: Filter decisions by the issue area of the case's | |
<reponame>bitclude/bitclude-python<gh_stars>0
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
class bitclude(Exchange):
def describe(self):
return self.deep_extend(super(bitclude, self).describe(), {
'id': 'bitclude',
'name': 'Bitclude',
'countries': ['PL'],
'rateLimit': 2000,
'certified': False,
'pro': False,
'urls': {
'api': {
'public': 'https://api.bitclude.com/',
'private': 'https://api.bitclude.com/',
},
'www': 'https://bitclude.com',
'doc': 'https://docs.bitclude.com',
},
'requiredCredentials': {
'apiKey': True,
'secret': False,
'uid': True,
},
'has': {
'fetchMarkets': 'emulated',
'fetchCurrencies': True, # private
'cancelAllOrders': False,
'fetchClosedOrders': False,
'createDepositAddress': True,
'fetchDepositAddress': 'emulated',
'fetchDeposits': True,
'fetchFundingFees': 'emulated',
'fetchMyTrades': True,
'fetchOHLCV': False,
'fetchOpenOrders': True,
'fetchOrder': False,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchTickers': True,
'fetchTicker': 'emulated',
'fetchTrades': True,
'fetchTradingFees': False,
'fetchWithdrawals': False,
'withdraw': False,
},
'api': {
'public': {
'get': [
'stats/ticker.json',
'stats/orderbook_{base}{quote}.json',
'stats/history_{base}{quote}.json',
],
},
'private': {
'get': [
'',
],
},
},
'exceptions': {
# stolen, todo rewrite
'exact': {
'Not enough balances': InsufficientFunds, # {"error":"Not enough balances","success":false}
'InvalidPrice': InvalidOrder, # {"error":"Invalid price","success":false}
'Size too small': InvalidOrder, # {"error":"Size too small","success":false}
'Missing parameter price': InvalidOrder, # {"error":"Missing parameter price","success":false}
'Order not found': OrderNotFound, # {"error":"Order not found","success":false}
},
'broad': {
'Invalid parameter': BadRequest, # {"error":"Invalid parameter start_time","success":false}
'The requested URL was not found on the server': BadRequest,
'No such coin': BadRequest,
'No such market': BadRequest,
'An unexpected error occurred': ExchangeError, # {"error":"An unexpected error occurred, please try again later(58BC21C795).","success":false}
},
},
'precisionMode': DECIMAL_PLACES,
})
def fetch_markets(self, params={}):
response = self.publicGetStatsTickerJson(params)
result = []
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = (base + '/' + quote)
precision = {
'price': None,
'amount': None,
}
info = {}
info[id] = self.safe_value(response, id)
entry = {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': None,
'info': info,
}
result.append(entry)
return result
def fetch_currencies(self, params={}):
if not self.apiKey or not self.uid:
raise AuthenticationError(self.id + " fetchCurrencies is an authenticated endpoint, therefore it requires 'apiKey' and 'uid' credentials. If you don't need currency details, set exchange.has['fetchCurrencies'] = False before calling its methods.")
request = {
'method': 'account',
'action': 'getwalletsstatus',
}
response = self.privateGet(self.extend(request, params))
ids = list(response.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
if id == 'success':
continue
currency = response[id]
code = self.safe_currency_code(id)
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': None,
'active': self.safe_value(currency, 'is_online'),
'fee': self.safe_float(currency, 'current_optimal_fee'),
'precision': self.safe_integer(currency, 'decimal_point'),
'limits': {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_float(currency, 'current_minimal_amount'),
'max': None,
},
},
}
return result
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
symbols = self.symbols if (symbols is None) else symbols
tickers = self.publicGetStatsTickerJson(params)
marketIds = list(self.marketsById.keys())
result = {}
for i in range(0, len(marketIds)):
marketId = marketIds[i]
market = self.marketsById[marketId]
symbol = market['symbol']
ticker = self.safe_value(tickers, marketId)
if self.in_array(symbol, symbols):
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
ticker = self.fetch_tickers([symbol])
return self.safe_value(ticker, symbol)
def parse_ticker(self, ticker, market):
timestamp = self.milliseconds()
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'max24H'),
'low': self.safe_float(ticker, 'min24H'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': self.safe_float(ticker, 'last'),
'last': self.safe_float(ticker, 'last'),
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': None,
'info': ticker,
}
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
baseId, quoteId = market['id'].split('_')
request = {
'base': baseId,
'quote': quoteId,
}
response = self.publicGetStatsOrderbookBaseQuoteJson(self.extend(request, params))
data = self.safe_value(response, 'data')
timestamp = self.safe_timestamp(data, 'timestamp')
parsedOrderBook = self.parse_order_book(response, timestamp, 'bids', 'asks', 1, 0)
if limit is not None:
parsedOrderBook['bids'] = self.filter_by_since_limit(parsedOrderBook['bids'], None, limit)
parsedOrderBook['asks'] = self.filter_by_since_limit(parsedOrderBook['asks'], None, limit)
return parsedOrderBook
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'base': market['baseId'],
'quote': market['quoteId'],
}
response = self.publicGetStatsHistoryBaseQuoteJson(self.extend(request, params))
trades = self.safe_value(response, 'history')
return self.parse_trades(trades, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'method': 'account',
'action': 'history',
}
response = self.privateGet(self.extend(request, params))
trades = self.safe_value(response, 'history', [])
return self.parse_trades(trades, market, since, limit)
def parse_trade(self, trade, market=None):
# fetchTrades
#
# {
# "time":1531917229,
# "nr":"786",
# "amount":"0.00018620",
# "price":"7314.57",
# "type":"a"
# }
#
# fetchMyTrades
#
# {
# "currency1": "btc",
# "currency2": "usd",
# "amount": "0.00100000",
# "time_close": 1516212758,
# "price": "4.00",
# "fee_taker": "50", # Idk what does it exactly means
# "fee_maker": "0",
# "type": "bid",
# "action": "open"
# }
id = self.safe_string(trade, 'nr')
timestamp = self.safe_integer_2(trade, 'time', 'time_close')
if 'time' in trade:
# API return timestamp in different formats depending on endpoint. Of course self isn't specified in docs xD
timestamp = timestamp * 1000
type = None
baseId = self.safe_string(trade, 'currency1')
quoteId = self.safe_string(trade, 'currency2')
symbol = None
quote = None
if baseId is not None and quoteId is not None:
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = (base + '/' + quote)
else:
symbol = market['symbol']
quote = market['quote']
side = self.safe_string(trade, 'type')
if side == 'a' or side == 'ask':
side = 'sell'
elif side == 'b' or side == 'bid':
side = 'buy'
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
if self.currency(quote)['precision'] is not None:
cost = self.currency_to_precision(quote, cost)
fee = None # todo
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'order': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_balance(self, params={}):
self.load_markets()
request = {
'method': 'account',
'action': 'info',
}
response = self.privateGet(self.extend(request, params))
result = {
'info': response,
}
balances = self.safe_value(response, 'balances', [])
currencies = list(balances.keys())
for i in range(0, len(currencies)):
balance = self.safe_value(balances, currencies[i])
currencyCode = self.safe_currency_code(currencies[i])
account = self.account()
account['free'] = self.safe_float(balance, 'active')
account['used'] = self.safe_float(balance, 'inactive')
result[currencyCode] = account
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
orderId = None
response = None
feeCost = None
feeCurrency = None
if type == 'limit':
request = {
'method': 'transactions',
'action': side,
'market1': market['baseId'],
'market2': market['quoteId'],
'amount': self.currency_to_precision(market['base'], amount),
'rate': self.currency_to_precision(market['quote'], price),
}
response = self.privateGet(self.extend(request, params))
order = self.safe_value(response, 'actions')
orderId = self.safe_string(order, 'order')
elif type == 'market':
request = {
'method': 'account',
'action': 'convert',
}
request['market1'] = market['baseId'] if (side == 'sell') else market['quoteId']
request['market2'] = market['quoteId'] if (side == 'sell') else market['baseId']
currencyOfAmount = market['base'] if (side == 'sell') else market['quote']
request['amount'] = self.currency_to_precision(currencyOfAmount, amount)
response = self.privateGet(self.extend(request, params))
feeCurrency = market['quote'] if (side == 'sell') else market['base']
feeCost = self.safe_string(response, 'fee')
timestamp = self.milliseconds()
return {
'id': orderId,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': 'open',
'symbol': market['symbol'],
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'fee': {
'currency': feeCurrency,
'cost': feeCost,
'rate': None,
},
'trades': None,
'info': response,
}
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'method': 'account',
'action': 'activeoffers',
}
response = self.privateGet(self.extend(request, params))
result = self.safe_value(response, 'offers', [])
orders = self.parse_orders(result, None, since, limit)
if symbol is not None:
orders = self.filter_by(orders, 'symbol', symbol)
return orders
def parse_order(self, order, market=None):
# due to very diverse structure of orders self method only work for these returned by fetchOpenOrders
status = 'open'
side = self.safe_string(order, 'offertype')
if side == 'ask':
side = 'sell'
elif side == 'bid':
side = 'buy'
symbol = None
if market is None:
baseId = self.safe_string(order, 'currency1')
quoteId = self.safe_string(order, 'currency2')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = (base + '/' + quote)
else:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'time_open')
return {
| |
"""content module.
This module describes the incoming content.
"""
from functools import wraps
from .url import get_url
def _replace_args(*args):
"""Class decorator. Changes built-in names in kwargs.
:param args: arg names (id, from, type).
"""
def wrapper(cls):
origin_init = cls.__init__
@wraps(cls.__init__)
def __init__(self, **kwargs):
replace_dict = {arg + '_': kwargs.pop(arg) for arg in args}
kwargs.update(replace_dict)
origin_init(self, **kwargs)
cls.__init__ = __init__
return cls
return wrapper
class _Content(object):
"""Base class."""
@classmethod
def from_list(cls, dict_list):
return [cls(**obj_dict) for obj_dict in dict_list]
class DictItem(object):
"""Base class."""
def to_dict(self):
args_dict = {key: val for key, val in self.__dict__.items() if val}
return args_dict
class _Query(_Content):
"""Base class for query types."""
def get_str_type(self):
"""Makes a string from the class name by separating the case."""
str_type = ''
for char in self.__class__.__name__:
if char.isupper():
str_type += '_{}'.format(char.lower())
else:
str_type += char
return str_type[1:]
class _MediaContent(_Content):
"""Base class for media content."""
def __init__(self, file_id, file_size=None, **kwargs):
self.id = file_id
self.size = file_size
for arg in kwargs:
setattr(self, arg, kwargs[arg])
def __str__(self):
return '{}(id{})'.format(self.__class__.__name__, self.id)
class Update(_Content):
"""This class represents an incoming update. """
def __init__(self, update_id, message=None, edited_message=None,
channel_post=None, edited_channel_post=None,
inline_query=None, chosen_inline_result=None,
callback_query=None, shipping_query=None,
pre_checkout_query=None):
"""Initial instance.
:param update_id: The update‘s unique identifier. Update identifiers
start from a certain positive number and increase sequentially.
This ID becomes especially handy if you’re using Webhooks, since it
allows you to ignore repeated updates or to restore the correct
update sequence, should they get out of order.
:param message: New incoming message of any kind — text, photo,
sticker, etc.
:param edited_message: New version of a message that is known to the
bot and was edited.
:param channel_post: New incoming channel post of any kind — text,
photo, sticker, etc.
:param edited_channel_post: New version of a channel post that is known
to the bot and was edited.
:param inline_query: New incoming inline query.
:param chosen_inline_result: The result of an inline query that was
chosen by a user and sent to their chat partner.
:param callback_query: New incoming callback query.
:param shipping_query: New incoming shipping query. Only for invoices
with flexible price.
:param pre_checkout_query: New incoming pre-checkout query. Contains
full information about checkout.
"""
self.id = update_id
message_dict = (
message or edited_message or channel_post or edited_channel_post
)
if message_dict:
self.content = Message(**message_dict)
if inline_query:
self.content = InlineQuery(**inline_query)
if chosen_inline_result:
self.content = ChosenInlineResult(**chosen_inline_result)
if callback_query:
self.content = CallbackQuery(**callback_query)
if shipping_query:
self.content = ShippingQuery(**shipping_query)
if pre_checkout_query:
self.content = PreCheckoutQuery(**pre_checkout_query)
def __str__(self):
return '{}(id:{id}, content:{content})'.format(self.__class__.__name__,
**self.__dict__)
@_replace_args('id')
class User(_Content):
"""This class represents a Telegram user or bot."""
def __init__(self, id_, first_name, last_name=None, username=None,
language_code=None, is_bot=None):
"""Initial instance.
:param id_: Unique identifier for this user or bot.
:param first_name: User‘s or bot’s first name.
:param last_name: User‘s or bot’s last name.
:param username: User‘s or bot’s username.
:param language_code: IETF language tag of the user's language.
:param is_bot: True, if this user is a bot.
"""
self.id = id_
self.first_name = first_name
self.last_name = last_name
self.username = username
self.language_code = language_code
self.is_bot = is_bot
def __str__(self):
return '{}(id:{id}, username:{username})'.format(
self.__class__.__name__,
**self.__dict__
)
@_replace_args('type')
class Chat(User):
"""This class represents a chat."""
def __init__(self, type_, all_members_are_administrators=None, title=None,
photo=None, description=None, invite_link=None,
pinned_message=None, **kwargs):
"""Initial instance.
:param type_: Type of chat, can be either 'private', 'group',
'supergroup' or 'channel'.
:param all_members_are_administrators: True if a group has
'All Members Are Admins' enabled.
:param title: Title, for supergroups, channels and group chats.
:param photo: ChatPhoto. Returned only in get_chat.
:param description: Description, for supergroups and channel chats.
Returned only in get_chat.
:param invite_link: Chat invite link, for supergroups and channel
chats. Returned only in get_chat.
:param pinned_message: Pinned message, for supergroups and channel
chats. Returned only in get_chat.
:param kwargs: id, username, first_name, last_name.
"""
super(Chat, self).__init__(**kwargs)
self.type = type_
self.title = title
self.description = description
self.invite_link = invite_link
if pinned_message:
self.pinned_message = Message(**pinned_message)
if photo:
self.photo = ChatPhoto(**photo)
self.all_members_are_administrators = all_members_are_administrators
def __str__(self):
return '{}(id:{id}, type:{type})'.format(self.__class__.__name__,
**self.__dict__)
class ChatPhoto(_Content):
"""This class represents a chat photo."""
def __init__(self, small_file_id, big_file_id):
"""Initial instance.
:param small_file_id: Unique file identifier of small (160x160) chat
photo. This file_id can be used only for photo download.
:param big_file_id: Unique file identifier of big (640x640) chat photo.
This file_id can be used only for photo download.
"""
self.small_file_id = small_file_id
self.big_file_id = big_file_id
class Contact(User):
"""This class represents a phone contact."""
def __init__(self, phone_number, **kwargs):
"""Initial instance.
:param phone_number: Contact's phone number.
:param kwargs: user_id, first_name, last_name.
"""
kwargs['id'] = kwargs.pop('user_id', None)
super(Contact, self).__init__(**kwargs)
self.phone_number = phone_number
def __str__(self):
return '{}(id:{id}, phone_number:{phone_number})'.format(
self.__class__.__name__,
**self.__dict__
)
@_replace_args('from')
class Message(_Content):
"""This class represents a message."""
def __init__(self, message_id, date, chat, from_=None, forward_from=None,
forward_from_chat=None, forward_from_message_id=None,
forward_date=None, reply_to_message=None, edit_date=None,
text=None, entities=None, audio=None, document=None,
game=None, photo=None, sticker=None, video=None, voice=None,
caption=None, contact=None, location=None, venue=None,
new_chat_members=None, left_chat_member=None, video_note=None,
new_chat_title=None, new_chat_photo=None,
delete_chat_photo=None, group_chat_created=None,
channel_chat_created=None, migrate_to_chat_id=None,
migrate_from_chat_id=None, pinned_message=None, invoice=None,
successful_payment=None, author_signature=None,
forward_signature=None, caption_entities=None):
"""Initial instance.
:param message_id: Unique message identifier inside this chat.
:param date: Date the message was sent in Unix time.
:param chat: Conversation the message belongs to.
:param from_: Sender, can be empty for messages sent to channels.
:param forward_from: For forwarded messages, sender of the original
message.
:param forward_from_chat: For messages forwarded from a channel,
information about the original channel.
:param forward_from_message_id: For forwarded channel posts, identifier
of the original message in the channel.
:param forward_date: For forwarded messages, date the original message
was sent in Unix time.
:param reply_to_message: For replies, the original message. Note that
the Message object in this field will not contain further
reply_to_message fields even if it itself is a reply.
:param edit_date: Date the message was last edited in Unix time.
:param text: For text messages, the actual UTF-8 text of the message,
0-4096 characters.
:param entities: For text messages, special entities like usernames,
URLs, bot commands, etc. that appear in the text.
:param audio: Message is an audio file, information about the file.
:param document: Message is a general file, information about the file.
:param game: Message is a game, information about the game.
:param photo: Message is a photo, available sizes of the photo.
:param sticker: Message is a sticker, information about the sticker.
:param video: Message is a video, information about the video.
:param video_note: Message is a video note, information about the video
message.
:param voice: Message is a voice message, information about the file.
:param caption: Caption for the document, photo or video, 0-200
characters.
:param contact: Message is a shared contact, information about the
contact.
:param location: Message is a shared location, information about the
location.
:param venue: Message is a venue, information about the venue.
:param new_chat_members: New members that were added to the group or
supergroup and information about them (the bot itself may be one of
these members).
:param left_chat_member: A member was removed from the group,
information about them (this member may be the bot itself).
:param new_chat_title: A chat title was changed to this value.
:param new_chat_photo: A chat photo was change to this value.
:param delete_chat_photo: Service message: the chat photo was deleted.
:param group_chat_created: Service message: the group has been created.
:param channel_chat_created: Service message: the channel has been
created. This field can‘t be received in a message coming through
updates, because bot can’t be a member of a channel when it is
created. It can only be found in reply_to_message if someone
replies to a very first message in a channel.
:param migrate_to_chat_id: The group has been migrated to a supergroup
with the specified identifier. This number may be greater than 32
bits and some programming languages may have difficulty/silent
defects in interpreting it. But it is smaller than 52 bits, so a
signed 64 bit integer or double-precision float type are safe for
storing this identifier.
:param migrate_from_chat_id: The supergroup has been migrated from a
group with the specified identifier. This number may be greater
than 32 bits and some programming languages may have
difficulty/silent defects in interpreting it. But it | |
<gh_stars>0
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NTS-Net adapted for perturbed top-k.
Based on the original PyTorch code
https://github.com/yangze0930/NTS-Net/blob/master/core/model.py
"""
import enum
import functools
import math
from typing import List, Tuple
from absl import app
from absl import flags
from absl import logging
import chex
from clu import platform
import einops
from flax.deprecated import nn
import jax
import jax.numpy as jnp
import ml_collections
import ml_collections.config_flags as config_flags
from off_the_grid.lib import data
from off_the_grid.lib import models
from off_the_grid.lib import utils
import off_the_grid.lib.classification_utils as classification_lib
from off_the_grid.lib.layers import sample_patches
from off_the_grid.lib.layers import transformer
import optax
import tensorflow as tf
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", None, "Work unit directory.")
NUM_CLASSES = 200
ANCHORS_SETTINGS = (
dict(
layer="p3",
stride=32,
size=48,
scale=[2**(1. / 3.), 2**(2. / 3.)],
aspect_ratio=[0.667, 1, 1.5]), # Anchors 0-5
dict(
layer="p4",
stride=64,
size=96,
scale=[2**(1. / 3.), 2**(2. / 3.)],
aspect_ratio=[0.667, 1, 1.5]), # Anchors 6-11
dict(
layer="p5",
stride=128,
size=192,
scale=[1, 2**(1. / 3.), 2**(2. / 3.)],
aspect_ratio=[0.667, 1, 1.5]), # Anchors 12-20
)
class Communication(str, enum.Enum):
NONE = "none"
SQUEEZE_EXCITE_D = "squeeze_excite_d"
SQUEEZE_EXCITE_X = "squeeze_excite_x"
TRANSFORMER = "transformer"
def zeroone(scores, x_min, x_max):
"""Normalize values to lie between [0, 1]."""
return [(x - x_min) / (x_max - x_min + 1e-5) for x in scores]
class ProposalNet(nn.Module):
"""FPN inspired scorer module."""
def apply(self, x,
communication = Communication.NONE,
train = True):
"""Forward pass."""
batch_size = x.shape[0]
if communication is Communication.SQUEEZE_EXCITE_X:
x = sample_patches.SqueezeExciteLayer(x)
# end if squeeze excite x
d1 = nn.relu(nn.Conv(
x, 128, kernel_size=(3, 3), strides=(1, 1), bias=True, name="down1"))
d2 = nn.relu(nn.Conv(
d1, 128, kernel_size=(3, 3), strides=(2, 2), bias=True, name="down2"))
d3 = nn.relu(nn.Conv(
d2, 128, kernel_size=(3, 3), strides=(2, 2), bias=True, name="down3"))
if communication is Communication.SQUEEZE_EXCITE_D:
d1_flatten = einops.rearrange(d1, "b h w c -> b (h w) c")
d2_flatten = einops.rearrange(d2, "b h w c -> b (h w) c")
d3_flatten = einops.rearrange(d3, "b h w c -> b (h w) c")
nd1 = d1_flatten.shape[1]
nd2 = d2_flatten.shape[1]
d_together = jnp.concatenate([d1_flatten, d2_flatten, d3_flatten], axis=1)
num_channels = d_together.shape[-1]
y = d_together.mean(axis=1)
y = nn.Dense(y, features=num_channels // 4, bias=False)
y = nn.relu(y)
y = nn.Dense(y, features=num_channels, bias=False)
y = nn.sigmoid(y)
d_together = d_together * y[:, None, :]
# split and reshape
d1 = d_together[:, :nd1].reshape(d1.shape)
d2 = d_together[:, nd1:nd1+nd2].reshape(d2.shape)
d3 = d_together[:, nd1+nd2:].reshape(d3.shape)
elif communication is Communication.TRANSFORMER:
d1_flatten = einops.rearrange(d1, "b h w c -> b (h w) c")
d2_flatten = einops.rearrange(d2, "b h w c -> b (h w) c")
d3_flatten = einops.rearrange(d3, "b h w c -> b (h w) c")
nd1 = d1_flatten.shape[1]
nd2 = d2_flatten.shape[1]
d_together = jnp.concatenate([d1_flatten, d2_flatten, d3_flatten], axis=1)
positional_encodings = self.param(
"scale_ratio_position_encodings",
shape=(1,) + d_together.shape[1:],
initializer=jax.nn.initializers.normal(1. / d_together.shape[-1]))
d_together = transformer.Transformer(
d_together + positional_encodings,
num_layers=2,
num_heads=8,
is_training=train)
# split and reshape
d1 = d_together[:, :nd1].reshape(d1.shape)
d2 = d_together[:, nd1:nd1+nd2].reshape(d2.shape)
d3 = d_together[:, nd1+nd2:].reshape(d3.shape)
t1 = nn.Conv(
d1, 6, kernel_size=(1, 1), strides=(1, 1), bias=True, name="tidy1")
t2 = nn.Conv(
d2, 6, kernel_size=(1, 1), strides=(1, 1), bias=True, name="tidy2")
t3 = nn.Conv(
d3, 9, kernel_size=(1, 1), strides=(1, 1), bias=True, name="tidy3")
raw_scores = (jnp.split(t1, 6, axis=-1) +
jnp.split(t2, 6, axis=-1) +
jnp.split(t3, 9, axis=-1))
# The following is for normalization.
t = jnp.concatenate((jnp.reshape(t1, [batch_size, -1]),
jnp.reshape(t2, [batch_size, -1]),
jnp.reshape(t3, [batch_size, -1])), axis=1)
t_min = jnp.reshape(jnp.min(t, axis=-1), [batch_size, 1, 1, 1])
t_max = jnp.reshape(jnp.max(t, axis=-1), [batch_size, 1, 1, 1])
normalized_scores = zeroone(raw_scores, t_min, t_max)
stats = {
"scores": normalized_scores,
"raw_scores": t,
}
# removes the split dimension. scores are now b x h' x w' shaped
normalized_scores = [s.squeeze(-1) for s in normalized_scores]
return normalized_scores, stats
def extract_weighted_patches(x,
weights,
kernel,
stride,
padding):
"""Weighted average of patches using jax.lax.scan."""
logging.info("recompiling for kernel=%s and stride=%s and padding=%s", kernel,
stride, padding)
x = jnp.pad(x, ((0, 0),
(padding[0], padding[0] + kernel[0]),
(padding[1], padding[1] + kernel[1]),
(0, 0)))
batch_size, _, _, channels = x.shape
_, k, weights_h, weights_w = weights.shape
def accumulate_patches(acc, index_i_j):
i, j = index_i_j
patch = jax.lax.dynamic_slice(
x,
(0, i * stride[0], j * stride[1], 0),
(batch_size, kernel[0], kernel[1], channels))
weight = weights[:, :, i, j]
weighted_patch = jnp.einsum("bk, bijc -> bkijc", weight, patch)
acc += weighted_patch
return acc, None
indices = jnp.stack(
jnp.meshgrid(jnp.arange(weights_h), jnp.arange(weights_w), indexing="ij"),
axis=-1)
indices = indices.reshape((-1, 2))
init_patches = jnp.zeros((batch_size, k, kernel[0], kernel[1], channels))
patches, _ = jax.lax.scan(accumulate_patches, init_patches, indices)
return patches
def weighted_anchor_aggregator(x, weights):
"""Given a tensor of weights per anchor computes the weighted average."""
counter = 0
all_sub_aggregates = []
for anchor_info in ANCHORS_SETTINGS:
stride = anchor_info["stride"]
size = anchor_info["size"]
for scale in anchor_info["scale"]:
for aspect_ratio in anchor_info["aspect_ratio"]:
kernel_size = (
int(size * scale / float(aspect_ratio) ** 0.5),
int(size * scale * float(aspect_ratio) ** 0.5))
padding = (
math.ceil((kernel_size[0] - stride) / 2.),
math.ceil((kernel_size[1] - stride) / 2.))
aggregate = extract_weighted_patches(
x, weights[counter], kernel_size, (stride, stride), padding)
aggregate = jnp.reshape(aggregate,
[-1, kernel_size[0], kernel_size[1], 3])
aggregate_224 = jax.image.resize(aggregate,
[aggregate.shape[0], 224, 224, 3],
"bilinear")
all_sub_aggregates.append(aggregate_224)
counter += 1
return jnp.sum(jnp.stack(all_sub_aggregates, axis=0), axis=0)
class AttentionNet(nn.Module):
"""The complete NTS-Net model using perturbed top-k."""
def apply(self,
x,
config,
num_classes,
train = True):
"""Creates a model definition."""
b, c = x.shape[0], x.shape[3]
k = config.k
sigma = config.ptopk_sigma
num_samples = config.ptopk_num_samples
sigma *= self.state("sigma_mutiplier", shape=(),
initializer=nn.initializers.ones).value
stats = {"x": x, "sigma": sigma}
feature_extractor = models.ResNet50.shared(train=train, name="ResNet_0")
rpn_feature = feature_extractor(x)
rpn_scores, rpn_stats = ProposalNet(
jax.lax.stop_gradient(rpn_feature),
communication=Communication(config.communication),
train=train)
stats.update(rpn_stats)
# rpn_scores are a list of score images. We keep track of the structure
# because it is used in the aggregation step later-on.
rpn_scores_shapes = [s.shape for s in rpn_scores]
rpn_scores_flat = jnp.concatenate(
[jnp.reshape(s, [b, -1]) for s in rpn_scores], axis=1)
top_k_indicators = sample_patches.select_patches_perturbed_topk(
rpn_scores_flat,
k=k,
sigma=sigma,
num_samples=num_samples)
top_k_indicators = jnp.transpose(top_k_indicators, [0, 2, 1])
offset = 0
weights = []
for sh in rpn_scores_shapes:
cur = top_k_indicators[:, :, offset:offset + sh[1] * sh[2]]
cur = jnp.reshape(cur, [b, k, sh[1], sh[2]])
weights.append(cur)
offset += sh[1] * sh[2]
chex.assert_equal(offset, top_k_indicators.shape[-1])
part_imgs = weighted_anchor_aggregator(x, weights)
chex.assert_shape(part_imgs, (b * k, 224, 224, c))
stats["part_imgs"] = jnp.reshape(part_imgs, [b, k*224, 224, c])
part_features = feature_extractor(part_imgs)
part_features = jnp.mean(part_features, axis=[1, 2]) # GAP the spatial dims
part_features = nn.dropout( # features from parts
jnp.reshape(part_features, [b * k, 2048]),
0.5,
deterministic=not train,
rng=nn.make_rng())
features = nn.dropout( # features from whole image
jnp.reshape(jnp.mean(rpn_feature, axis=[1, 2]), [b, -1]),
0.5,
deterministic=not train,
rng=nn.make_rng())
# Mean pool all part features, add it to features and predict logits.
concat_out = jnp.mean(jnp.reshape(part_features, [b, k, 2048]),
axis=1) + features
concat_logits = nn.Dense(concat_out, num_classes)
raw_logits = nn.Dense(features, num_classes)
part_logits = jnp.reshape(nn.Dense(part_features, num_classes), [b, k, -1])
all_logits = {
"raw_logits": raw_logits,
"concat_logits": concat_logits,
"part_logits": part_logits,
}
# add entropy into it for entropy regularization.
stats["rpn_scores_entropy"] = jax.scipy.special.entr(
jax.nn.softmax(stats["raw_scores"])).sum(axis=1).mean(axis=0)
return all_logits, stats
def create_optimizer(config):
"""Creates the optimizer associated to a config."""
ops = []
# Gradient clipping either by norm `gradient_norm_clip` or by absolute value
# `gradient_value_clip`.
if "gradient_clip" in config:
raise ValueError("'gradient_clip' is deprecated, please use "
"'gradient_norm_clip'.")
assert not ("gradient_norm_clip" in config and
"gradient_value_clip" in config), (
"Gradient clipping by norm and by value are exclusive.")
if "gradient_norm_clip" in config:
ops.append(optax.clip_by_global_norm(config.gradient_norm_clip))
if "gradient_value_clip" in config:
ops.append(optax.clip(config.gradient_value_clip))
# Define the learning rate schedule.
schedule_fn = utils.get_optax_schedule_fn(
warmup_ratio=config.get("warmup_ratio", 0.),
num_train_steps=config.num_train_steps,
decay=config.get("learning_rate_step_decay", 1.0),
decay_at_steps=config.get("learning_rate_decay_at_steps", []),
cosine_decay_schedule=config.get("cosine_decay", False))
schedule_ops = [optax.scale_by_schedule(schedule_fn)]
# Scale some parameters matching a regex by a multiplier. Config field
# `scaling_by_regex` is a list of pairs (regex: str, multiplier: float).
scaling_by_regex = config.get("scaling_learning_rate_by_regex", [])
for regex, multiplier in scaling_by_regex:
logging.info("Learning rate is scaled by %f for parameters matching '%s'",
multiplier, regex)
schedule_ops.append(utils.scale_selected_parameters(regex, multiplier))
schedule_optimizer = optax.chain(*schedule_ops)
if "weight_decay_coupled" in config and config.weight_decay_coupled > 0.:
# it calls decoupled weight decay before applying optimizer which is
# coupled weight decay. :D
ops.append(utils.decoupled_weight_decay(
decay=config.weight_decay_coupled,
step_size_fn=lambda x: jnp.ones([], dtype=jnp.float32)))
if config.optimizer.lower() == "adam":
optimizer | |
import base64
import logging
import os
import re
import subprocess
import tempfile
from datetime import datetime, time, date
from decimal import Decimal
from os.path import basename, join
from pathlib import Path
from typing import List, Optional
import pytz
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.template.loader import get_template
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from jacc.helpers import sum_queryset
from jacc.models import AccountEntry, AccountEntrySourceFile, Account, AccountEntryManager
from jbank.x509_helpers import get_x509_cert_from_file
from jutil.modelfields import SafeCharField, SafeTextField
from jutil.format import format_xml, get_media_full_path, choices_label
from jutil.validators import iban_validator, iban_bic, iso_payment_reference_validator, fi_payment_reference_validator
logger = logging.getLogger(__name__)
JBANK_BIN_PATH = Path(__file__).absolute().parent.joinpath("bin")
RECORD_ENTRY_TYPE = (
("1", _("Deposit")),
("2", _("Withdrawal")),
("3", _("Deposit Correction")),
("4", _("Withdrawal Correction")),
)
RECORD_CODES = (
("700", _("Money Transfer (In/Out)")),
("701", _("Recurring Payment (In/Out)")),
("702", _("Bill Payment (Out)")),
("703", _("Payment Terminal Deposit (In)")),
("704", _("Bank Draft (In/Out)")),
("705", _("Reference Payments (In)")),
("706", _("Payment Service (Out)")),
("710", _("Deposit (In)")),
("720", _("Withdrawal (Out)")),
("721", _("Card Payment (Out)")),
("722", _("Check (Out)")),
("730", _("Bank Fees (Out)")),
("740", _("Interests Charged (Out)")),
("750", _("Interests Credited (In)")),
("760", _("Loan (Out)")),
("761", _("Loan Payment (Out)")),
("770", _("Foreign Transfer (In/Out)")),
("780", _("Zero Balancing (In/Out)")),
("781", _("Sweeping (In/Out)")),
("782", _("Topping (In/Out)")),
)
RECORD_DOMAIN = (
("PMNT", _("Money Transfer (In/Out)")),
("LDAS", _("Loan Payment (Out)")),
("CAMT", _("Cash Management")),
("ACMT", _("Account Management")),
("XTND", _("Entended Domain")),
("SECU", _("Securities")),
("FORX", _("Foreign Exchange")),
("XTND", _("Entended Domain")),
("NTAV", _("Not Available")),
)
RECEIPT_CODE = (
("", ""),
("0", "(0)"),
("E", _("Separate")),
("P", _("Separate/Paper")),
)
CURRENCY_IDENTIFIERS = (("1", "EUR"),)
NAME_SOURCES = (
("", _("Not Set")),
("A", _("From Customer")),
("K", _("From Bank Clerk")),
("J", _("From Bank System")),
)
CORRECTION_IDENTIFIER = (
("0", _("Regular Entry")),
("1", _("Correction Entry")),
)
DELIVERY_METHOD_UNKNOWN = ""
DELIVERY_FROM_CUSTOMER = "A"
DELIVERY_FROM_BANK_CLERK = "K"
DELIVERY_FROM_BANK_SYSTEM = "J"
DELIVERY_METHOD = (
(DELIVERY_METHOD_UNKNOWN, ""),
(DELIVERY_FROM_CUSTOMER, _("From Customer")),
(DELIVERY_FROM_BANK_CLERK, _("From Bank Clerk")),
(DELIVERY_FROM_BANK_SYSTEM, _("From Bank System")),
)
PAYOUT_WAITING_PROCESSING = "W"
PAYOUT_WAITING_UPLOAD = "U"
PAYOUT_UPLOADED = "D"
PAYOUT_PAID = "P"
PAYOUT_CANCELED = "C"
PAYOUT_ERROR = "E"
PAYOUT_STATE = (
(PAYOUT_WAITING_PROCESSING, _("waiting processing")),
(PAYOUT_WAITING_UPLOAD, _("waiting upload")),
(PAYOUT_UPLOADED, _("uploaded")),
(PAYOUT_PAID, _("paid")),
(PAYOUT_CANCELED, _("canceled")),
(PAYOUT_ERROR, _("error")),
)
class Statement(AccountEntrySourceFile):
file = models.ForeignKey("StatementFile", blank=True, default=None, null=True, on_delete=models.CASCADE)
account = models.ForeignKey(Account, related_name="+", on_delete=models.PROTECT)
account_number = SafeCharField(_("account number"), max_length=32, db_index=True)
statement_identifier = SafeCharField(_("statement identifier"), max_length=48, db_index=True, blank=True, default="")
statement_number = models.SmallIntegerField(_("statement number"), db_index=True)
begin_date = models.DateField(_("begin date"), db_index=True)
end_date = models.DateField(_("end date"), db_index=True)
record_date = models.DateTimeField(_("record date"), db_index=True)
customer_identifier = SafeCharField(_("customer identifier"), max_length=64, blank=True, default="")
begin_balance_date = models.DateField(_("begin balance date"), null=True, blank=True, default=None)
begin_balance = models.DecimalField(_("begin balance"), max_digits=10, decimal_places=2)
record_count = models.IntegerField(_("record count"), null=True, default=None)
currency_code = SafeCharField(_("currency code"), max_length=3)
account_name = SafeCharField(_("account name"), max_length=32, blank=True, default="")
account_limit = models.DecimalField(_("account limit"), max_digits=10, decimal_places=2, blank=True, default=None, null=True)
owner_name = SafeCharField(_("owner name"), max_length=64)
contact_info_1 = SafeCharField(_("contact info (1)"), max_length=64, blank=True, default="")
contact_info_2 = SafeCharField(_("contact info (2)"), max_length=64, blank=True, default="")
bank_specific_info_1 = SafeCharField(_("bank specific info (1)"), max_length=1024, blank=True, default="")
iban = SafeCharField(_("IBAN"), max_length=32, db_index=True)
bic = SafeCharField(_("BIC"), max_length=11, db_index=True)
class Meta:
verbose_name = _("statement")
verbose_name_plural = _("statements")
class PaymentRecordManager(AccountEntryManager):
def filter_matched(self):
return self.exclude(child_set=None)
def filter_unmatched(self):
return self.filter(child_set=None)
class StatementRecord(AccountEntry):
objects: models.Manager = PaymentRecordManager() # type: ignore
statement = models.ForeignKey(Statement, verbose_name=_("statement"), related_name="record_set", on_delete=models.CASCADE)
line_number = models.SmallIntegerField(_("line number"), default=None, null=True, blank=True)
record_number = models.IntegerField(_("record number"), default=None, null=True, blank=True)
archive_identifier = SafeCharField(_("archive identifier"), max_length=64, blank=True, default="", db_index=True)
record_date = models.DateField(_("record date"), db_index=True)
value_date = models.DateField(_("value date"), db_index=True, blank=True, null=True, default=None)
paid_date = models.DateField(_("paid date"), db_index=True, blank=True, null=True, default=None)
entry_type = SafeCharField(_("entry type"), max_length=1, choices=RECORD_ENTRY_TYPE, db_index=True)
record_code = SafeCharField(_("record type"), max_length=4, choices=RECORD_CODES, db_index=True, blank=True)
record_domain = SafeCharField(_("record domain"), max_length=4, choices=RECORD_DOMAIN, db_index=True, blank=True)
family_code = SafeCharField(_("family code"), max_length=4, db_index=True, blank=True, default="")
sub_family_code = SafeCharField(_("sub family code"), max_length=4, db_index=True, blank=True, default="")
record_description = SafeCharField(_("record description"), max_length=128, blank=True, default="")
receipt_code = SafeCharField(_("receipt code"), max_length=1, choices=RECEIPT_CODE, db_index=True, blank=True)
delivery_method = SafeCharField(_("delivery method"), max_length=1, db_index=True, choices=DELIVERY_METHOD, blank=True)
name = SafeCharField(_("name"), max_length=64, blank=True, db_index=True)
name_source = SafeCharField(_("name source"), max_length=1, blank=True, choices=NAME_SOURCES)
recipient_account_number = SafeCharField(_("recipient account number"), max_length=32, blank=True, db_index=True)
recipient_account_number_changed = SafeCharField(_("recipient account number changed"), max_length=1, blank=True)
remittance_info = SafeCharField(_("remittance info"), max_length=35, db_index=True, blank=True)
messages = SafeTextField(_("messages"), blank=True, default="")
client_messages = SafeTextField(_("client messages"), blank=True, default="")
bank_messages = SafeTextField(_("bank messages"), blank=True, default="")
manually_settled = models.BooleanField(_("manually settled"), db_index=True, default=False, blank=True)
class Meta:
verbose_name = _("statement record")
verbose_name_plural = _("statement records")
@property
def is_settled(self) -> bool:
"""
True if entry is either manually settled or has SUM(children)==amount.
"""
return self.manually_settled or sum_queryset(self.child_set) == self.amount # type: ignore
def clean(self):
self.source_file = self.statement
self.timestamp = pytz.utc.localize(datetime.combine(self.record_date, time(0, 0)))
if self.name:
self.description = "{name}: {record_description}".format(record_description=self.record_description, name=self.name)
else:
self.description = "{record_description}".format(record_description=self.record_description)
class CurrencyExchangeSource(models.Model):
name = SafeCharField(_("name"), max_length=64)
created = models.DateTimeField(_("created"), default=now, db_index=True, blank=True, editable=False)
class Meta:
verbose_name = _("currency exchange source")
verbose_name_plural = _("currency exchange sources")
def __str__(self):
return str(self.name)
class CurrencyExchange(models.Model):
record_date = models.DateField(_("record date"), db_index=True)
source_currency = SafeCharField(_("source currency"), max_length=3, blank=True)
target_currency = SafeCharField(_("target currency"), max_length=3, blank=True)
unit_currency = SafeCharField(_("unit currency"), max_length=3, blank=True)
exchange_rate = models.DecimalField(_("exchange rate"), decimal_places=6, max_digits=12, null=True, default=None, blank=True)
source = models.ForeignKey(
CurrencyExchangeSource,
verbose_name=_("currency exchange source"),
blank=True,
null=True,
default=None,
on_delete=models.PROTECT,
) # noqa
class Meta:
verbose_name = _("currency exchange")
verbose_name_plural = _("currency exchanges")
def __str__(self):
return "{src} = {rate} {tgt}".format(src=self.source_currency, tgt=self.target_currency, rate=self.exchange_rate)
class StatementRecordDetail(models.Model):
record = models.ForeignKey(StatementRecord, verbose_name=_("record"), related_name="detail_set", on_delete=models.CASCADE)
batch_identifier = SafeCharField(_("batch message id"), max_length=64, db_index=True, blank=True, default="")
amount = models.DecimalField(verbose_name=_("amount"), max_digits=10, decimal_places=2, blank=True, default=None, null=True, db_index=True)
currency_code = SafeCharField(_("currency code"), max_length=3)
instructed_amount = models.DecimalField(
verbose_name=_("instructed amount"),
max_digits=10,
decimal_places=2,
blank=True,
default=None,
null=True,
db_index=True,
)
exchange = models.ForeignKey(
CurrencyExchange,
verbose_name=_("currency exchange"),
related_name="recorddetail_set",
on_delete=models.PROTECT,
null=True,
default=None,
blank=True,
)
archive_identifier = SafeCharField(_("archive identifier"), max_length=64, blank=True)
end_to_end_identifier = SafeCharField(_("end-to-end identifier"), max_length=64, blank=True)
creditor_name = SafeCharField(_("creditor name"), max_length=128, blank=True)
creditor_account = SafeCharField(_("creditor account"), max_length=35, blank=True)
creditor_account_scheme = SafeCharField(_("creditor account scheme"), max_length=8, blank=True)
debtor_name = SafeCharField(_("debtor name"), max_length=128, blank=True)
ultimate_debtor_name = SafeCharField(_("ultimate debtor name"), max_length=128, blank=True)
unstructured_remittance_info = SafeCharField(_("unstructured remittance info"), max_length=2048, blank=True)
paid_date = models.DateTimeField(_("paid date"), db_index=True, blank=True, null=True, default=None)
class Meta:
verbose_name = _("statement record details")
verbose_name_plural = _("statement record details")
class StatementRecordRemittanceInfo(models.Model):
detail = models.ForeignKey(StatementRecordDetail, related_name="remittanceinfo_set", on_delete=models.CASCADE)
additional_info = SafeCharField(_("additional remittance info"), max_length=256, blank=True, db_index=True)
amount = models.DecimalField(_("amount"), decimal_places=2, max_digits=10, null=True, default=None, blank=True)
currency_code = SafeCharField(_("currency code"), max_length=3, blank=True)
reference = SafeCharField(_("reference"), max_length=35, blank=True, db_index=True)
def __str__(self):
return "{} {} ref {} ({})".format(self.amount if self.amount is not None else "", self.currency_code, self.reference, self.additional_info)
class Meta:
verbose_name = _("statement record remittance info")
verbose_name_plural = _("statement record remittance info")
class StatementRecordSepaInfo(models.Model):
record = models.OneToOneField(StatementRecord, verbose_name=_("record"), related_name="sepa_info", on_delete=models.CASCADE)
reference = SafeCharField(_("reference"), max_length=35, blank=True)
iban_account_number = SafeCharField(_("IBAN"), max_length=35, blank=True)
bic_code = SafeCharField(_("BIC"), max_length=35, blank=True)
recipient_name_detail = SafeCharField(_("recipient name detail"), max_length=70, blank=True)
payer_name_detail = SafeCharField(_("payer name detail"), max_length=70, blank=True)
identifier = SafeCharField(_("identifier"), max_length=35, blank=True)
archive_identifier = SafeCharField(_("archive identifier"), max_length=64, blank=True)
class Meta:
verbose_name = _("SEPA")
verbose_name_plural = _("SEPA")
def __str__(self):
return "[{}]".format(self.id)
class ReferencePaymentBatchManager(models.Manager):
def latest_record_date(self) -> Optional[datetime]:
"""
:return: datetime of latest record available or None
"""
obj = self.order_by("-record_date").first()
if not obj:
return None
return obj.record_date
class ReferencePaymentBatch(AccountEntrySourceFile):
objects = ReferencePaymentBatchManager()
file = models.ForeignKey("ReferencePaymentBatchFile", blank=True, default=None, null=True, on_delete=models.CASCADE)
record_date = models.DateTimeField(_("record date"), db_index=True)
institution_identifier = SafeCharField(_("institution identifier"), max_length=2, blank=True)
service_identifier = SafeCharField(_("service identifier"), max_length=9, blank=True)
currency_identifier = SafeCharField(_("currency identifier"), max_length=3, choices=CURRENCY_IDENTIFIERS)
cached_total_amount = models.DecimalField(_("total amount"), max_digits=10, decimal_places=2, null=True, default=None, blank=True)
class Meta:
verbose_name = _("reference payment batch")
verbose_name_plural = _("reference payment batches")
def get_total_amount(self, force: bool = False) -> Decimal:
if self.cached_total_amount is None or force:
self.cached_total_amount = sum_queryset(ReferencePaymentRecord.objects.filter(batch=self))
self.save(update_fields=["cached_total_amount"])
return self.cached_total_amount
@property
def total_amount(self) -> Decimal:
return self.get_total_amount()
total_amount.fget.short_description = _("total amount") # type: ignore
class ReferencePaymentRecord(AccountEntry):
"""
Reference payment record. See jacc.Invoice for date/time variable naming conventions.
"""
objects = PaymentRecordManager() # type: ignore
batch = models.ForeignKey(ReferencePaymentBatch, verbose_name=_("batch"), related_name="record_set", on_delete=models.CASCADE)
line_number = models.SmallIntegerField(_("line number"), default=0, blank=True)
record_type = SafeCharField(_("record type"), max_length=1)
account_number = SafeCharField(_("account number"), max_length=32, db_index=True)
record_date = models.DateField(_("record date"), db_index=True)
paid_date = models.DateField(_("paid date"), db_index=True, blank=True, null=True, default=None)
archive_identifier = SafeCharField(_("archive identifier"), max_length=32, blank=True, default="", db_index=True)
remittance_info = SafeCharField(_("remittance info"), max_length=32, db_index=True)
payer_name = SafeCharField(_("payer name"), max_length=12, blank=True, default="", db_index=True)
currency_identifier = SafeCharField(_("currency identifier"), max_length=1, choices=CURRENCY_IDENTIFIERS)
name_source = SafeCharField(_("name source"), max_length=1, choices=NAME_SOURCES, blank=True)
correction_identifier = SafeCharField(_("correction identifier"), max_length=1, choices=CORRECTION_IDENTIFIER)
delivery_method = SafeCharField(_("delivery method"), max_length=1, db_index=True, choices=DELIVERY_METHOD, blank=True)
receipt_code = SafeCharField(_("receipt code"), max_length=1, choices=RECEIPT_CODE, db_index=True, blank=True)
manually_settled = models.BooleanField(_("manually settled"), db_index=True, default=False, blank=True)
class Meta:
verbose_name = _("reference payment records")
verbose_name_plural = _("reference payment records")
@property
def is_settled(self) -> bool:
"""
True if entry is either manually settled or has SUM(children)==amount.
"""
| |
<reponame>subClassy/ners
import copy
import numpy as np
import pytorch3d
import torch
import torch.nn as nn
import trimesh
from pytorch3d.loss import mesh_laplacian_smoothing
from pytorch3d.structures import Meshes
from tqdm.auto import tqdm
import ners.utils.geometry as geom_util
from ners.utils import sample_consistent_points
def pretrain_template_uv(
template_uv,
verts=None,
faces=None,
extents=None,
num_iterations=1000,
num_samples=1000,
sphere_level=5,
device="cuda:0",
pbar=True,
):
"""
Pretrains the template UV shape model. Must be initialized either with vertices and
faces or with 3D cuboid extents.
Args:
verts (torch.Tensor): (N_v, 3) tensor of vertices.
faces (torch.Tensor): (N_f, 3) tensor of faces.
extents (list): list of 3D cuboid extents (w, h, d).
Returns:
template_uv (TemplateUV): pretrained template UV shape model mapping from uv
coordinates (..., 3) to 3D vertex coordinates (..., 3).
"""
template_uv = template_uv.to(device)
if verts is None:
tmesh = trimesh.creation.box(extents=extents)
verts = torch.tensor(tmesh.vertices, device=device).float()
faces = torch.tensor(tmesh.faces, device=device).long()
else:
verts = verts.to(device)
faces = faces.to(device)
verts_sphere = verts / verts.norm(dim=-1, keepdim=True)
optim = torch.optim.Adam(template_uv.parameters(), lr=0.001)
sphere_vs, sphere_fs = geom_util.create_sphere(level=sphere_level, device=device)
loop = tqdm(range(num_iterations)) if pbar else range(num_iterations)
for _ in loop:
optim.zero_grad()
targets, uvs = sample_consistent_points(
verts, faces, [verts, verts_sphere], num_samples
)
pred_vs = template_uv(uvs.to(device), normalize=True)
sv = (sphere_vs @ geom_util.random_rotation(device)).unsqueeze(0)
meshes = Meshes(template_uv(sv, normalize=True), sphere_fs.unsqueeze(0))
loss_reconstruction = torch.mean((pred_vs - targets.to(device)) ** 2)
loss_laplacian = mesh_laplacian_smoothing(meshes)
loss = 20 * loss_reconstruction + loss_laplacian
loss.backward()
optim.step()
loop.set_description(f"Template: {loss.item():.4f}")
return template_uv
def shape_model_to_mesh(shape_model, sphere_level=4, textures=None):
device = shape_model.get_device(default_device="cuda:0")
sphere_vs, sphere_fs = geom_util.create_sphere(level=sphere_level, device=device)
if textures is None:
textures = pytorch3d.renderer.TexturesVertex((sphere_vs.unsqueeze(0) + 1) / 2)
mesh = Meshes(
[shape_model(sphere_vs)],
[sphere_fs],
textures=textures,
)
return mesh.to(device)
class HarmonicEmbedding(torch.nn.Module):
def __init__(self, n_harmonic_functions=10, omega0=0.1):
"""
Positional Embedding implementation (adapted from Pytorch3D).
Given an input tensor `x` of shape [minibatch, ... , dim],
the harmonic embedding layer converts each feature
in `x` into a series of harmonic features `embedding`
as follows:
embedding[..., i*dim:(i+1)*dim] = [
sin(x[..., i]),
sin(2*x[..., i]),
sin(4*x[..., i]),
...
sin(2**self.n_harmonic_functions * x[..., i]),
cos(x[..., i]),
cos(2*x[..., i]),
cos(4*x[..., i]),
...
cos(2**self.n_harmonic_functions * x[..., i])
]
Note that `x` is also premultiplied by `omega0` before
evaluting the harmonic functions.
"""
super().__init__()
self.register_buffer(
"frequencies",
omega0 * (2.0 ** torch.arange(n_harmonic_functions)),
)
def forward(self, x):
"""
Args:
x: tensor of shape [..., dim]
Returns:
embedding: a harmonic embedding of `x`
of shape [..., n_harmonic_functions * dim * 2]
"""
embed = (x[..., None] * self.frequencies).view(*x.shape[:-1], -1)
return torch.cat((embed.sin(), embed.cos()), dim=-1)
class BaseNetwork(nn.Module):
def __init__(self, n_harmonic_functions=10, omega0=0.1):
super().__init__()
self.positional_encoding = HarmonicEmbedding(n_harmonic_functions, omega0)
def get_device(self, default_device=None):
"""
Returns which device the module is on. If wrapped in DataParallel, will return
the default device.
"""
try:
return next(self.parameters()).device
except StopIteration:
return default_device
class TemplateUV(BaseNetwork):
def __init__(
self, num_layers=3, input_size=3, output_size=3, hidden_size=256, L=10
):
input_size = (L * 2 * input_size) + 288
super().__init__(n_harmonic_functions=L)
latent_space = np.load('data/mean_latent.npz')
self.latent_space = torch.from_numpy(latent_space['arr_0']).unsqueeze(0).cuda()
self.latent_layers = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(256),
nn.Conv2d(256, 128, kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(128, 64, kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(64, 32, kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Flatten()
)
layers = []
for i in range(num_layers - 1):
if i == 0:
layers.append(nn.Linear(input_size, hidden_size))
else:
layers.append(nn.Linear(hidden_size, hidden_size))
layers.append(nn.LayerNorm(hidden_size))
layers.append(nn.LeakyReLU())
layers.append(nn.Linear(hidden_size, output_size))
nn.init.xavier_uniform_(layers[-1].weight, gain=0.001)
nn.init.zeros_(layers[-1].bias)
self.mlp = nn.Sequential(*layers)
def forward(self, x, normalize=True):
temp_device = x.device
x = x.to(self.get_device(temp_device))
if normalize:
x = x / (x.norm(dim=-1, keepdim=True))
lt = self.latent_layers(self.latent_space).squeeze()
h = self.positional_encoding(x)
sh = list(h.shape)
sh[-1] = 288
ltt = torch.ones((sh))
ltt[:] = lt
ltt = ltt.to(self.get_device(temp_device))
h = torch.concat((h, ltt), -1)
h = self.mlp(h)
return (x + h).to(temp_device)
class DeltaUV(BaseNetwork):
def __init__(
self, num_layers=3, input_size=3, output_size=3, hidden_size=256, L=10
):
input_size = (L * 2 * input_size) + 288
super().__init__(n_harmonic_functions=L)
latent_space = np.load('data/mean_latent.npz')
self.latent_space = torch.from_numpy(latent_space['arr_0']).unsqueeze(0).cuda()
self.latent_layers = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(256),
nn.Conv2d(256, 128, kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(128, 64, kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(64, 32, kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Flatten()
)
layers = []
for i in range(num_layers - 1):
if i == 0:
layers.append(nn.Linear(input_size, hidden_size))
else:
layers.append(nn.Linear(hidden_size, hidden_size))
layers.append(nn.LayerNorm(hidden_size))
layers.append(nn.LeakyReLU())
layers.append(nn.Linear(hidden_size, output_size))
nn.init.xavier_uniform_(layers[-1].weight, gain=0.001)
nn.init.zeros_(layers[-1].bias)
self.mlp = nn.Sequential(*layers)
def forward(self, x):
temp_device = x.device
x = x.to(self.get_device(temp_device))
lt = self.latent_layers(self.latent_space).squeeze()
h = self.positional_encoding(x)
sh = list(h.shape)
sh[-1] = 288
ltt = torch.ones((sh))
ltt[:] = lt
ltt = ltt.to(self.get_device(temp_device))
h = torch.concat((h, ltt), -1)
x = self.mlp(h)
return x.to(temp_device)
class ImplicitTextureNet(BaseNetwork):
def __init__(
self,
num_layers=8,
input_size=3,
hidden_size=256,
output_size=3,
L=10,
max_batch_size=10000,
output_activation="sigmoid",
gain=0.01,
):
"""
Texture prediction network mapping UV to RGB.
Args:
num_layers (int, optional): Number of layers. Defaults to 12.
input_size (int, optional): Dimension of input. Defaults to 3.
hidden_size (int, optional): Dimension of hidden layers. Defaults to 256.
output_size (int, optional): Dimension of output. Defaults to 3.
L (int, optional): Number of frequencies for positional encoding. Defaults
to 6.
max_batch_size (int, optional): Maximum batch size. If over, automatically
computes separate batches when using `forward_batched`. Defaults to
10000.
output_activation (str, optional): Output activation function can be
"sigmoid" if outputting RGB or "tanh" if outputting deltas. Defaults to
"sigmoid".
gain (float, optional): Gain for output activation to initialize near 0.5.
"""
input_size = (input_size * L * 2) + 288
super().__init__(n_harmonic_functions=L, omega0=0.1)
latent_space = np.load('data/mean_latent.npz')
self.latent_space = torch.from_numpy(latent_space['arr_0']).unsqueeze(0).cuda()
self.latent_layers = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(256),
nn.Conv2d(256, 128, kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(128, 64, kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(64, 32, kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Flatten()
)
self.num_layers = num_layers
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
norm = nn.LayerNorm(hidden_size)
layers = [nn.Linear(input_size, hidden_size), norm, nn.LeakyReLU()]
for _ in range(num_layers - 2):
layers.append(nn.Linear(hidden_size, hidden_size))
layers.append(norm)
layers.append(nn.LeakyReLU())
layers.append(nn.Linear(hidden_size, output_size))
nn.init.xavier_uniform_(layers[-1].weight, gain=gain)
nn.init.zeros_(layers[-1].bias)
self.mlp = nn.Sequential(*layers)
if output_activation == "sigmoid":
self.final_activation = nn.Sigmoid()
elif output_activation == "tanh":
self.final_activation = nn.Tanh()
else:
raise Exception(
f"Final activation must be sigmoid or tanh. Got: {output_activation}."
)
self.max_batch_size = max_batch_size
def forward(self, x, normalize=True):
"""
Args:
x: (B,3)
Returns:
y: (B,3)
"""
shape = x.shape
x = x.reshape(-1, shape[-1])
# The points outside of the mesh also get passed into TexNet, which is a lot of
# unnecessary computation. We will skip over those points, which correspond to
# (0, 0, 0)
mask = torch.any(x != 0, dim=1)
x = x[mask]
temp_device = x.device
if torch.any(mask):
x = x.to(self.get_device(temp_device))
if normalize:
x = x / (x.norm(dim=-1, keepdim=True) + 1e-6) # Project to sphere.
lt = self.latent_layers(self.latent_space).squeeze()
h = self.positional_encoding(x)
sh = list(h.shape)
sh[-1] = 288
ltt = torch.ones((sh))
ltt[:] = lt
ltt = ltt.to(self.get_device(temp_device))
h = torch.concat((h, ltt), -1)
x = self.mlp(h)
x = self.final_activation(x)
x = x.to(temp_device)
y = torch.ones(len(mask), self.output_size, device=temp_device)
y[mask] = x
y = y.reshape(shape[:-1] + (-1,))
return y.float()
def forward_batched(self, x, batch_size=None, normalize=True):
"""
Computes forward pass using minibatches to reduce memory usage of forward pass.
Args:
x (B,3).
Returns:
y (B,3).
"""
n = x.shape[0]
b = self.max_batch_size if batch_size is None else batch_size
y = []
for i in range(0, n, b):
pred = self.forward(
x[i : i + b],
normalize=normalize,
)
y.append(pred)
return torch.cat(y, dim=0)
def save_model(self, name):
path = f"{name}.pth"
torch.save(self.state_dict(), path)
def load_model(self, name):
path = f"{name}.pth"
self.load_state_dict(torch.load(path))
def unwrap_uv_map(self, height=256, width=256, margin=0):
"""
Unwraps the tex_net into a UV Image.
Args:
tex_net (ImplicitTextureNet): Texture network mapping from spherical coordinates
to RGB.
height (int, optional): Height of UV image. Defaults to 256.
width (int, optional): Width of UV image. Defaults to 256.
margin (float, optional): Width of redundancy on right side. Defaults to 0
(no margin).
Returns:
tensor: Unwrapped texture (H, W, 3).
"""
theta = torch.linspace(0, np.pi, height)
phi = torch.linspace(-np.pi, np.pi * (1 + margin), width)
theta, phi = torch.meshgrid(theta, phi)
x, y, z = geom_util.spherical_to_cartesian(theta, phi)
coords = torch.dstack((x, y, z)).cuda()
shape = coords.shape[:2] + (3,)
pred_texture = self.forward(coords.reshape(-1, 3))
if pred_texture.shape[1] == 1:
# For single channel environment maps.
pred_texture = pred_texture.repeat(1, 3)
pred_texture = pred_texture.reshape(shape)
return pred_texture
def clone(self):
return copy.deepcopy(self)
class EnvironmentMap(ImplicitTextureNet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_single_channel = True
def forward(self, x, normalize=True, **kwargs):
temp_device = x.device
x = x.to(self.get_device(temp_device))
if normalize:
x = x / (x.norm(dim=-1, keepdim=True) + 1e-6) # Project to sphere.
lt = self.latent_layers(self.latent_space).squeeze()
h = self.positional_encoding(x)
sh = list(h.shape)
sh[-1] | |
= dimension_type
@lazyproperty
def valid_elements(self) -> "_ValidElements":
"""_ValidElements object containing only non-missing elements."""
return _ValidElements(self._elements, self._dimension_transforms_dict)
@lazyproperty
def _element_dicts(self) -> List[Dict]:
"""Sequence of element-dicts for this dimension, taken from cube-result."""
return (
self._type_dict["categories"]
if self._type_dict["class"] == "categorical"
else self._type_dict["elements"]
)
@lazyproperty
def _elements(self) -> Tuple["_Element", ...]:
"""tuple storing actual sequence of element objects."""
return tuple(
_Element(
element_dict,
idx,
_ElementTransforms(element_transforms_dict),
)
for (
idx,
element_dict,
element_transforms_dict,
) in self._iter_element_makings()
)
@lazyproperty
def _elements_transforms(self) -> Dict:
"""Element transform dict expressed in the dimension transforms expression."""
return (
self._shimmed_element_transforms
if self._dimension_type == DT.MR
else self._dimension_transforms_dict.get("elements", {})
)
def _iter_element_makings(self) -> Iterator[Tuple[int, Dict, Dict]]:
"""Generate tuple of values needed to construct each element object.
An (idx, element_dict, element_transforms_dict) tuple is generated for each
element in this dimension, in the order they appear in the cube-result. All
elements are included (including missing).
"""
elements_transforms = self._elements_transforms
for idx, element_dict in enumerate(self._element_dicts):
# --- convert to string for categorical ids
element_id = element_dict["id"]
element_transforms_dict = elements_transforms.get(
element_id, elements_transforms.get(str(element_id), {})
)
yield idx, element_dict, element_transforms_dict
@lazyproperty
def _shimmed_element_transforms(self) -> Dict:
"""Element transforms dict for array dimensions.
To provide consistency with a poorly-defined interface for categorical
insertions, a client can include a `"hide": true` field in a (complete) copy of
a variable-based insertion in order to suppress that variable-based insertion.
For the array case, these need to be translated to a "hide" transform on the
subvariable element because such an insertion becomes a derived subvariable just
like the other subvariables in the dimension.
"""
# --- currently an inserted-subvariable can only be identified by name, there is
# --- no alias for an inserted-subvariable and it does not receive a "normal"
# --- element.id like "0001".
hidden_insertion_names = tuple(
insertion["name"]
for insertion in self._dimension_transforms_dict.get("insertions", [])
if insertion.get("hide", False)
)
# --- however, the hide-transform must be identified by element-id, so we need a
# --- mapping of insertion-name to element-id
element_id_from_name = {
element["value"]["id"]: element["id"] for element in self._element_dicts
}
# --- merge hide transforms with (a copy of) the existing element transforms ---
hidden_transforms = {
element_id_from_name[name]: {"hide": True}
for name in hidden_insertion_names
}
element_transforms = self._dimension_transforms_dict.get("elements", {})
return {**hidden_transforms, **element_transforms}
class _ValidElements(_BaseElements):
"""Sequence of non-missing element objects for a dimension.
*all_elements* is an instance of _AllElements containing all the elements
of a dimension. This object is only intended to be constructed by
_AllElements.valid_elements and there should be no reason to construct it
directly.
"""
def __init__(self, all_elements, dimension_transforms_dict):
self._all_elements = all_elements
self._dimension_transforms_dict = dimension_transforms_dict
@lazyproperty
def _elements(self) -> Tuple["_Element", ...]:
"""tuple containing actual sequence of element objects."""
return tuple(element for element in self._all_elements if not element.missing)
class _ElementIdShim:
"""Object used to replace element ids with alias for subvariables.
We want to move to a world where elements on a subvariables dimension are
identified by their alias, but right now the "element_id" from zz9 is
an index, and the transforms have several different ways to refer to
subvariables.
Types of identifiers for subvariables (and derived insertions):
* "element_id": Stored in the cube result as the object name in
`dimensions[i].type.elements[j].id`. For subvariables, zz9 currently puts
the index integer here. Long term zz9 may change this to the the alias.
* "subvariable_id": Subvariables have an id stored in
`dimensions[i].type.elements[j].value.id`, generally this is a 4 digit,
0-padded index of the subvariable when it was first created (eg "0001",
"0002", ...), though it is not required to be. For derived insertions,
currently the name is used here.
* "alias": Subvariables also have an alias that identifies them. It is stored
in `dimensions[i].type.elements[j].value.
"""
def __init__(self, dimension_type, dimension_dict, dimension_transforms_dict):
self._dimension_type = dimension_type
self._dimension_dict = dimension_dict
self._dimension_transforms_dict = dimension_transforms_dict
@lazyproperty
def shimmed_dimension_dict(self) -> Dict:
"""Copy of dimension dictionary with shimmed `element_id`s
We want to move to a world where elements on a subvariables dimension are
identified by their alias, but right now the "element_id" from zz9 is
an index for subvariables.
"""
shim = copy.deepcopy(self._dimension_dict)
# --- Leave non-subvariable dimension types alone, as they don't have
# --- subvariable aliases to use, and category ids are already the main way
# --- we identify elements on categorical dimensions (and this is correct).
if self._dimension_type not in DT.ARRAY_TYPES:
return shim
# --- Replace element ids with the alias
for idx, alias in enumerate(self._subvar_aliases):
shim["type"]["elements"][idx]["id"] = alias
return shim
@lazyproperty
def shimmed_dimension_transforms_dict(self) -> Dict:
"""Copy of dimension transforms dictionary with shimmed `element_id`s
We want to move to a world where elements on a subvariables dimension are
identified by their alias, but right now the "element_id" from zz9 is
simply the subvariable's (unstable) cardinal position in subvariables
sequence. Different parts of the transforms have several different ways
to refer to subvariables.
Types of identifiers for subvariables (and derived insertions):
- "element_id": Stored in the cube result as the object name in
`dimensions[i].type.elements[j].id`. For subvariables, zz9 currently puts
the index integer here. Long term zz9 may change this to the the alias.
- "subvariable_id": Subvariables have an id stored in
`dimensions[i].type.elements[j].value.id`, generally this is a 4 digit,
0-padded index of the subvariable when it was first created (eg "0001",
"0002", ...), though it is not required to be. For derived insertions,
currently the name is used here.
- "alias": Subvariables also have an alias that identifies them. It is stored
in `dimensions[i].type.elements[j].value.references.alias`.
"""
shim = copy.deepcopy(self._dimension_transforms_dict)
# --- Leave non-subvariable dimension types alone, as they don't have
# --- subvariable aliases to use, and category ids are already the main way
# --- we identify elements on categorical dimensions (and this is correct).
if self._dimension_type not in DT.ARRAY_TYPES:
return shim
# --- Replace element transform ids with the alias
if "elements" in shim:
shim["elements"] = self._replaced_element_transforms(shim["elements"])
# --- Translate explicit order element ids if present
if shim.get("order", {}).get("element_ids") is not None:
shim["order"]["element_ids"] = self._replaced_order_element_ids(
shim["order"]["element_ids"]
)
# --- sort-by-value on the opposing dimension also refers to element ids, but
# --- the ids refer to the opposing dimension, so do the translation later on.
# --- This is a little unfortunate, because this means that the ids in this shim
# --- version of the dimension transforms are inconsistent. But it feels easier
# --- than forcing the dimensions to be aware of other dimensions.
return shim
def translate_element_id(self, _id) -> Optional[str]:
"""Optional string that is the translation of various ids to subvariable alias
0) If dimension is not a subvariables dimension, return the _id.
1) If id matches an alias, then just use it.
2) If id matches an element id, translate to corresponding alias.
3) If id matches a subvariable id, translate to corresponding alias.
4) If id can be parsed to int and matches an element id, translate to alias.
5) If id is int (or can be parsed to int) and can be used as index (eg in range
0-# of elements), use _id'th alias.
6) If all of these fail, return None.
"""
if self._dimension_type not in DT.ARRAY_TYPES:
return _id
if _id in self._subvar_aliases:
return _id
if _id in self._raw_element_ids:
return self._subvar_aliases[self._raw_element_ids.index(_id)]
if _id in self._subvar_ids:
return self._subvar_aliases[self._subvar_ids.index(_id)]
try:
_id = int(_id)
# --- If successfully converted to int, try raw element ids again
if _id in self._raw_element_ids:
return self._subvar_aliases[self._raw_element_ids.index(_id)]
except ValueError:
return None
if _id >= 0 and _id < len(self._subvar_aliases):
return self._subvar_aliases[_id]
return None
@lazyproperty
def _raw_element_ids(self) -> Tuple[Union[int, str], ...]:
"""tuple of int or string element ids, as they appear in cube result
These are "raw" because they refer to the element ids before they've been
replaced with the alias for subvariables in the `._shimmed_dimension_dict`.
"""
return tuple(
element["id"] for element in self._dimension_dict["type"]["elements"]
)
def _replaced_element_transforms(self, element_transforms) -> Dict:
"""Replace the dictionary keys of element transforms with aliases
The element transforms identify which element they refer to by their key in
the element_transforms object. Before it is shimmed, this can identify | |
<filename>mundiapi/controllers/charges_controller.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
mundiapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
from mundiapi.api_helper import APIHelper
from mundiapi.configuration import Configuration
from mundiapi.controllers.base_controller import BaseController
from mundiapi.http.auth.basic_auth import BasicAuth
from mundiapi.models.get_charge_response import GetChargeResponse
from mundiapi.models.list_charges_response import ListChargesResponse
from mundiapi.models.list_charge_transactions_response import ListChargeTransactionsResponse
from mundiapi.models.get_charges_summary_response import GetChargesSummaryResponse
class ChargesController(BaseController):
"""A Controller to access Endpoints in the mundiapi API."""
def update_charge_card(self,
charge_id,
request,
idempotency_key=None):
"""Does a PATCH request to /charges/{charge_id}/card.
Updates the card from a charge
Args:
charge_id (string): Charge id
request (UpdateChargeCardRequest): Request for updating a charge's
card
idempotency_key (string, optional): TODO: type description here.
Example:
Returns:
GetChargeResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/charges/{charge_id}/card'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'charge_id': charge_id
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8',
'idempotency-key': idempotency_key
}
# Prepare and execute request
_request = self.http_client.patch(_query_url, headers=_headers, parameters=APIHelper.json_serialize(request))
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GetChargeResponse.from_dictionary)
def update_charge_payment_method(self,
charge_id,
request,
idempotency_key=None):
"""Does a PATCH request to /charges/{charge_id}/payment-method.
Updates a charge's payment method
Args:
charge_id (string): Charge id
request (UpdateChargePaymentMethodRequest): Request for updating
the payment method from a charge
idempotency_key (string, optional): TODO: type description here.
Example:
Returns:
GetChargeResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/charges/{charge_id}/payment-method'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'charge_id': charge_id
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8',
'idempotency-key': idempotency_key
}
# Prepare and execute request
_request = self.http_client.patch(_query_url, headers=_headers, parameters=APIHelper.json_serialize(request))
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GetChargeResponse.from_dictionary)
def create_charge(self,
request,
idempotency_key=None):
"""Does a POST request to /Charges.
Creates a new charge
Args:
request (CreateChargeRequest): Request for creating a charge
idempotency_key (string, optional): TODO: type description here.
Example:
Returns:
GetChargeResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/Charges'
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8',
'idempotency-key': idempotency_key
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(request))
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GetChargeResponse.from_dictionary)
def get_charge(self,
charge_id):
"""Does a GET request to /charges/{charge_id}.
Get a charge from its id
Args:
charge_id (string): Charge id
Returns:
GetChargeResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/charges/{charge_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'charge_id': charge_id
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GetChargeResponse.from_dictionary)
def retry_charge(self,
charge_id,
idempotency_key=None):
"""Does a POST request to /charges/{charge_id}/retry.
Retries a charge
Args:
charge_id (string): Charge id
idempotency_key (string, optional): TODO: type description here.
Example:
Returns:
GetChargeResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/charges/{charge_id}/retry'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'charge_id': charge_id
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'idempotency-key': idempotency_key
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GetChargeResponse.from_dictionary)
def get_charges(self,
page=None,
size=None,
code=None,
status=None,
payment_method=None,
customer_id=None,
order_id=None,
created_since=None,
created_until=None):
"""Does a GET request to /charges.
Lists all charges
Args:
page (int, optional): Page number
size (int, optional): Page size
code (string, optional): Filter for charge's code
status (string, optional): Filter for charge's status
payment_method (string, optional): Filter for charge's payment
method
customer_id (string, optional): Filter for charge's customer id
order_id (string, optional): Filter for charge's order id
created_since (datetime, optional): Filter for the beginning of
the range for charge's creation
created_until (datetime, optional): Filter for the end of the
range for charge's creation
Returns:
ListChargesResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/charges'
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_parameters = {
'page': page,
'size': size,
'code': code,
'status': status,
'payment_method': payment_method,
'customer_id': customer_id,
'order_id': order_id,
'created_since': APIHelper.when_defined(APIHelper.RFC3339DateTime, created_since),
'created_until': APIHelper.when_defined(APIHelper.RFC3339DateTime, created_until)
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, ListChargesResponse.from_dictionary)
def update_charge_metadata(self,
charge_id,
request,
idempotency_key=None):
"""Does a PATCH request to /Charges/{charge_id}/metadata.
Updates the metadata from a charge
Args:
charge_id (string): The charge id
request (UpdateMetadataRequest): Request for updating the charge
metadata
idempotency_key (string, optional): TODO: type description here.
Example:
Returns:
GetChargeResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/Charges/{charge_id}/metadata'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'charge_id': charge_id
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8',
'idempotency-key': idempotency_key
}
# Prepare and execute request
_request = self.http_client.patch(_query_url, headers=_headers, parameters=APIHelper.json_serialize(request))
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GetChargeResponse.from_dictionary)
def cancel_charge(self,
charge_id,
request=None,
idempotency_key=None):
"""Does a DELETE request to /charges/{charge_id}.
Cancel a charge
Args:
charge_id (string): Charge id
request (CreateCancelChargeRequest, optional): Request for
cancelling a charge
idempotency_key (string, optional): TODO: type description here.
Example:
Returns:
GetChargeResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/charges/{charge_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'charge_id': charge_id
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8',
'idempotency-key': idempotency_key
}
# Prepare and execute request
_request = self.http_client.delete(_query_url, headers=_headers, parameters=APIHelper.json_serialize(request))
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GetChargeResponse.from_dictionary)
def capture_charge(self,
charge_id,
request=None,
idempotency_key=None):
"""Does a POST request to /charges/{charge_id}/capture.
Captures a charge
Args:
charge_id (string): Charge id
request (CreateCaptureChargeRequest, optional): Request for
capturing a charge
idempotency_key (string, optional): TODO: type description here.
Example:
Returns:
GetChargeResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/charges/{charge_id}/capture'
_url_path = | |
<gh_stars>1-10
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import math
def visualize_box_mask(im, results, labels, threshold=0.5):
"""
Args:
im (str/np.ndarray): path of image/np.ndarray read by cv2
results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
MaskRCNN's results include 'masks': np.ndarray:
shape:[N, im_h, im_w]
labels (list): labels:['class1', ..., 'classn']
threshold (float): Threshold of score.
Returns:
im (PIL.Image.Image): visualized image
"""
if isinstance(im, str):
im = Image.open(im).convert('RGB')
elif isinstance(im, np.ndarray):
im = Image.fromarray(im)
if 'masks' in results and 'boxes' in results and len(results['boxes']) > 0:
im = draw_mask(
im, results['boxes'], results['masks'], labels, threshold=threshold)
if 'boxes' in results and len(results['boxes']) > 0:
im = draw_box(im, results['boxes'], labels, threshold=threshold)
if 'segm' in results:
im = draw_segm(
im,
results['segm'],
results['label'],
results['score'],
labels,
threshold=threshold)
return im
def get_color_map_list(num_classes):
"""
Args:
num_classes (int): number of class
Returns:
color_map (list): RGB color list
"""
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
def draw_mask(im, np_boxes, np_masks, labels, threshold=0.5):
"""
Args:
im (PIL.Image.Image): PIL image
np_boxes (np.ndarray): shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
np_masks (np.ndarray): shape:[N, im_h, im_w]
labels (list): labels:['class1', ..., 'classn']
threshold (float): threshold of mask
Returns:
im (PIL.Image.Image): visualized image
"""
color_list = get_color_map_list(len(labels))
w_ratio = 0.4
alpha = 0.7
im = np.array(im).astype('float32')
clsid2color = {}
expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1)
np_boxes = np_boxes[expect_boxes, :]
np_masks = np_masks[expect_boxes, :, :]
for i in range(len(np_masks)):
clsid, score = int(np_boxes[i][0]), np_boxes[i][1]
mask = np_masks[i]
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color_mask = clsid2color[clsid]
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
idx = np.nonzero(mask)
color_mask = np.array(color_mask)
im[idx[0], idx[1], :] *= 1.0 - alpha
im[idx[0], idx[1], :] += alpha * color_mask
return Image.fromarray(im.astype('uint8'))
def draw_box(im, np_boxes, labels, threshold=0.5):
"""
Args:
im (PIL.Image.Image): PIL image
np_boxes (np.ndarray): shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
labels (list): labels:['class1', ..., 'classn']
threshold (float): threshold of box
Returns:
im (PIL.Image.Image): visualized image
"""
draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im)
clsid2color = {}
color_list = get_color_map_list(len(labels))
expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1)
np_boxes = np_boxes[expect_boxes, :]
for dt in np_boxes:
clsid, bbox, score = int(dt[0]), dt[2:], dt[1]
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color = tuple(clsid2color[clsid])
if len(bbox) == 4:
xmin, ymin, xmax, ymax = bbox
print('class_id:{:d}, confidence:{:.4f}, left_top:[{:.2f},{:.2f}],'
'right_bottom:[{:.2f},{:.2f}]'.format(
int(clsid), score, xmin, ymin, xmax, ymax))
# draw bbox
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=draw_thickness,
fill=color)
elif len(bbox) == 8:
x1, y1, x2, y2, x3, y3, x4, y4 = bbox
draw.line(
[(x1, y1), (x2, y2), (x3, y3), (x4, y4), (x1, y1)],
width=2,
fill=color)
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
# draw label
text = "{} {:.4f}".format(labels[clsid], score)
tw, th = draw.textsize(text)
draw.rectangle(
[(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill=color)
draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
return im
def draw_segm(im,
np_segms,
np_label,
np_score,
labels,
threshold=0.5,
alpha=0.7):
"""
Draw segmentation on image
"""
mask_color_id = 0
w_ratio = .4
color_list = get_color_map_list(len(labels))
im = np.array(im).astype('float32')
clsid2color = {}
np_segms = np_segms.astype(np.uint8)
for i in range(np_segms.shape[0]):
mask, score, clsid = np_segms[i], np_score[i], np_label[i]
if score < threshold:
continue
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color_mask = clsid2color[clsid]
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
idx = np.nonzero(mask)
color_mask = np.array(color_mask)
idx0 = np.minimum(idx[0], im.shape[0] - 1)
idx1 = np.minimum(idx[1], im.shape[1] - 1)
im[idx0, idx1, :] *= 1.0 - alpha
im[idx0, idx1, :] += alpha * color_mask
sum_x = np.sum(mask, axis=0)
x = np.where(sum_x > 0.5)[0]
sum_y = np.sum(mask, axis=1)
y = np.where(sum_y > 0.5)[0]
x0, x1, y0, y1 = x[0], x[-1], y[0], y[-1]
cv2.rectangle(im, (x0, y0), (x1, y1),
tuple(color_mask.astype('int32').tolist()), 1)
bbox_text = '%s %.2f' % (labels[clsid], score)
t_size = cv2.getTextSize(bbox_text, 0, 0.3, thickness=1)[0]
cv2.rectangle(im, (x0, y0), (x0 + t_size[0], y0 - t_size[1] - 3),
tuple(color_mask.astype('int32').tolist()), -1)
cv2.putText(
im,
bbox_text, (x0, y0 - 2),
cv2.FONT_HERSHEY_SIMPLEX,
0.3, (0, 0, 0),
1,
lineType=cv2.LINE_AA)
return Image.fromarray(im.astype('uint8'))
def get_color(idx):
idx = idx * 3
color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)
return color
def visualize_pose(imgfile,
results,
visual_thresh=0.6,
save_name='pose.jpg',
save_dir='output',
returnimg=False,
ids=None):
try:
import matplotlib.pyplot as plt
import matplotlib
plt.switch_backend('agg')
except Exception as e:
logger.error('Matplotlib not found, please install matplotlib.'
'for example: `pip install matplotlib`.')
raise e
skeletons, scores = results['keypoint']
skeletons = np.array(skeletons)
kpt_nums = 17
if len(skeletons) > 0:
kpt_nums = skeletons.shape[1]
if kpt_nums == 17: #plot coco keypoint
EDGES = [(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 8),
(7, 9), (8, 10), (5, 11), (6, 12), (11, 13), (12, 14),
(13, 15), (14, 16), (11, 12)]
else: #plot mpii keypoint
EDGES = [(0, 1), (1, 2), (3, 4), (4, 5), (2, 6), (3, 6), (6, 7), (7, 8),
(8, 9), (10, 11), (11, 12), (13, 14), (14, 15), (8, 12),
(8, 13)]
NUM_EDGES = len(EDGES)
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
cmap = matplotlib.cm.get_cmap('hsv')
plt.figure()
img = cv2.imread(imgfile) if type(imgfile) == str else imgfile
color_set = results['colors'] if 'colors' in results else None
if 'bbox' in results and ids is None:
bboxs = results['bbox']
for j, rect in enumerate(bboxs):
xmin, ymin, xmax, ymax = rect
color = colors[0] if color_set is None else colors[color_set[j] %
len(colors)]
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 1)
canvas = img.copy()
for i in range(kpt_nums):
for j in range(len(skeletons)):
if skeletons[j][i, 2] < visual_thresh:
continue
if ids is None:
color = colors[i] if color_set is None else colors[color_set[j]
%
len(colors)]
else:
color = get_color(ids[j])
cv2.circle(
canvas,
tuple(skeletons[j][i, 0:2].astype('int32')),
2,
color,
thickness=-1)
to_plot = cv2.addWeighted(img, 0.3, canvas, 0.7, 0)
fig = matplotlib.pyplot.gcf()
stickwidth = 2
for i in range(NUM_EDGES):
for j in range(len(skeletons)):
edge = EDGES[i]
if skeletons[j][edge[0], 2] < visual_thresh or skeletons[j][edge[
1], 2] < visual_thresh:
continue
cur_canvas = canvas.copy()
X = [skeletons[j][edge[0], 1], skeletons[j][edge[1], 1]]
Y = [skeletons[j][edge[0], 0], skeletons[j][edge[1], 0]]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY), int(mX)),
(int(length / 2), stickwidth),
int(angle), 0, 360, 1)
if ids is None:
color = colors[i] if color_set is None else colors[color_set[j]
%
len(colors)]
else:
color = get_color(ids[j])
cv2.fillConvexPoly(cur_canvas, polygon, color)
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
if returnimg:
return canvas
| |
the roles, actions, obligations, responsibilities, and
implication of the agreement.
"""
resource_type = "ContractFriendly"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contentAttachment = None
""" Easily comprehended representation of this Contract.
Type `Attachment` (represented as `dict` in JSON). """
self.contentReference = None
""" Easily comprehended representation of this Contract.
Type `FHIRReference` referencing `['Composition', 'DocumentReference', 'QuestionnaireResponse']` (represented as `dict` in JSON). """
super(ContractFriendly, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ContractFriendly, self).elementProperties()
js.extend(
[
(
"contentAttachment",
"contentAttachment",
attachment.Attachment,
"Attachment",
False,
"content",
True,
),
(
"contentReference",
"contentReference",
fhirreference.FHIRReference,
"Reference",
False,
"content",
True,
),
]
)
return js
class ContractLegal(backboneelement.BackboneElement):
""" Contract Legal Language.
List of Legal expressions or representations of this Contract.
"""
resource_type = "ContractLegal"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contentAttachment = None
""" Contract Legal Text.
Type `Attachment` (represented as `dict` in JSON). """
self.contentReference = None
""" Contract Legal Text.
Type `FHIRReference` referencing `['Composition', 'DocumentReference', 'QuestionnaireResponse']` (represented as `dict` in JSON). """
super(ContractLegal, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ContractLegal, self).elementProperties()
js.extend(
[
(
"contentAttachment",
"contentAttachment",
attachment.Attachment,
"Attachment",
False,
"content",
True,
),
(
"contentReference",
"contentReference",
fhirreference.FHIRReference,
"Reference",
False,
"content",
True,
),
]
)
return js
class ContractRule(backboneelement.BackboneElement):
""" Computable Contract Language.
List of Computable Policy Rule Language Representations of this Contract.
"""
resource_type = "ContractRule"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contentAttachment = None
""" Computable Contract Rules.
Type `Attachment` (represented as `dict` in JSON). """
self.contentReference = None
""" Computable Contract Rules.
Type `FHIRReference` referencing `['DocumentReference']` (represented as `dict` in JSON). """
super(ContractRule, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ContractRule, self).elementProperties()
js.extend(
[
(
"contentAttachment",
"contentAttachment",
attachment.Attachment,
"Attachment",
False,
"content",
True,
),
(
"contentReference",
"contentReference",
fhirreference.FHIRReference,
"Reference",
False,
"content",
True,
),
]
)
return js
class ContractSigner(backboneelement.BackboneElement):
""" Contract Signatory.
Parties with legal standing in the Contract, including the principal
parties, the grantor(s) and grantee(s), which are any person or
organization bound by the contract, and any ancillary parties, which
facilitate the execution of the contract such as a notary or witness.
"""
resource_type = "ContractSigner"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.party = None
""" Contract Signatory Party.
Type `FHIRReference` referencing `['Organization', 'Patient', 'Practitioner', 'PractitionerRole', 'RelatedPerson']` (represented as `dict` in JSON). """
self.signature = None
""" Contract Documentation Signature.
List of `Signature` items (represented as `dict` in JSON). """
self.type = None
""" Contract Signatory Role.
Type `Coding` (represented as `dict` in JSON). """
super(ContractSigner, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ContractSigner, self).elementProperties()
js.extend(
[
(
"party",
"party",
fhirreference.FHIRReference,
"Reference",
False,
None,
True,
),
(
"signature",
"signature",
signature.Signature,
"Signature",
True,
None,
True,
),
("type", "type", coding.Coding, "Coding", False, None, True),
]
)
return js
class ContractTerm(backboneelement.BackboneElement):
""" Contract Term List.
One or more Contract Provisions, which may be related and conveyed as a
group, and may contain nested groups.
"""
resource_type = "ContractTerm"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" Entity being ascribed responsibility.
List of `ContractTermAction` items (represented as `dict` in JSON). """
self.applies = None
""" Contract Term Effective Time.
Type `Period` (represented as `dict` in JSON). """
self.asset = None
""" Contract Term Asset List.
List of `ContractTermAsset` items (represented as `dict` in JSON). """
self.group = None
""" Nested Contract Term Group.
List of `ContractTerm` items (represented as `dict` in JSON). """
self.identifier = None
""" Contract Term Number.
Type `Identifier` (represented as `dict` in JSON). """
self.issued = None
""" Contract Term Issue Date Time.
Type `FHIRDate` (represented as `str` in JSON). """
self.offer = None
""" Context of the Contract term.
Type `ContractTermOffer` (represented as `dict` in JSON). """
self.securityLabel = None
""" Protection for the Term.
List of `ContractTermSecurityLabel` items (represented as `dict` in JSON). """
self.subType = None
""" Contract Term Type specific classification.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.text = None
""" Term Statement.
Type `str`. """
self.topicCodeableConcept = None
""" Term Concern.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.topicReference = None
""" Term Concern.
Type `FHIRReference` referencing `['Resource']` (represented as `dict` in JSON). """
self.type = None
""" Contract Term Type or Form.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(ContractTerm, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ContractTerm, self).elementProperties()
js.extend(
[
(
"action",
"action",
ContractTermAction,
"ContractTermAction",
True,
None,
False,
),
("applies", "applies", period.Period, "Period", False, None, False),
(
"asset",
"asset",
ContractTermAsset,
"ContractTermAsset",
True,
None,
False,
),
("group", "group", ContractTerm, "ContractTerm", True, None, False),
(
"identifier",
"identifier",
identifier.Identifier,
"Identifier",
False,
None,
False,
),
("issued", "issued", fhirdate.FHIRDate, "dateTime", False, None, False),
(
"offer",
"offer",
ContractTermOffer,
"ContractTermOffer",
False,
None,
True,
),
(
"securityLabel",
"securityLabel",
ContractTermSecurityLabel,
"ContractTermSecurityLabel",
True,
None,
False,
),
(
"subType",
"subType",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
("text", "text", str, "string", False, None, False),
(
"topicCodeableConcept",
"topicCodeableConcept",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
"topic",
False,
),
(
"topicReference",
"topicReference",
fhirreference.FHIRReference,
"Reference",
False,
"topic",
False,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
]
)
return js
class ContractTermAction(backboneelement.BackboneElement):
""" Entity being ascribed responsibility.
An actor taking a role in an activity for which it can be assigned some
degree of responsibility for the activity taking place.
"""
resource_type = "ContractTermAction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.context = None
""" Episode associated with action.
Type `FHIRReference` referencing `['Encounter', 'EpisodeOfCare']` (represented as `dict` in JSON). """
self.contextLinkId = None
""" Pointer to specific item.
List of `str` items. """
self.doNotPerform = None
""" True if the term prohibits the action.
Type `bool`. """
self.intent = None
""" Purpose for the Contract Term Action.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.linkId = None
""" Pointer to specific item.
List of `str` items. """
self.note = None
""" Comments about the action.
List of `Annotation` items (represented as `dict` in JSON). """
self.occurrenceDateTime = None
""" When action happens.
Type `FHIRDate` (represented as `str` in JSON). """
self.occurrencePeriod = None
""" When action happens.
Type `Period` (represented as `dict` in JSON). """
self.occurrenceTiming = None
""" When action happens.
Type `Timing` (represented as `dict` in JSON). """
self.performer = None
""" Actor that wil execute (or not) the action.
Type `FHIRReference` referencing `['RelatedPerson', 'Patient', 'Practitioner', 'PractitionerRole', 'CareTeam', 'Device', 'Substance', 'Organization', 'Location']` (represented as `dict` in JSON). """
self.performerLinkId = None
""" Pointer to specific item.
List of `str` items. """
self.performerRole = None
""" Competency of the performer.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.performerType = None
""" Kind of service performer.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.reason = None
""" Why action is to be performed.
List of `str` items. """
self.reasonCode = | |
"""scrapli_replay.server.collector"""
from copy import copy
from dataclasses import asdict, dataclass
from io import BytesIO
from typing import Any, Dict, List, Optional, Tuple, Union, cast
from ruamel.yaml import YAML # type: ignore
from scrapli import Scrapli
from scrapli.channel.sync_channel import Channel
from scrapli.driver.core import EOSDriver
from scrapli.driver.network.sync_driver import NetworkDriver
from scrapli.exceptions import ScrapliConnectionError
from scrapli.helper import user_warning
from scrapli_replay.exceptions import ScrapliReplayException
from scrapli_replay.logging import logger
# pylint: disable=W0212
READ_DURATION = 180
@dataclass()
class StandardEvent:
# the actual stuff the channel outputs
channel_output: str
# the privilege level at the end of the event
result_privilege_level: str
# if the event should, if False that would basically be like running a long command
# w/ paging still on, so we are stuck at --More-- prompt
returns_prompt: bool = True
# "exit" is a "standard" event, but it obviously can cause the connection to close, so for
# any event like that we'll set this to True, but of course it will default to False as that
# is the much more common/likely scenario
closes_connection: bool = False
# would be cool to add response delay -- i.e. device takes .04 seconds before spitting data out
# when this command is ran, could also add delay in the middle of a command, like it sputters
# while outputting data or something
@dataclass()
class InteractStep:
# the expected input, if an unexpected input occurs during an "interaction" we have
# to raise some error to a user like a device would
channel_input: str
# the actual stuff the channel outputs
channel_output: str
# if the channel in put is "hidden" like for password prompts
hidden_input: bool = False
# if the event should, if False that would basically be like running a long command
# w/ paging still on, so we are stuck at --More-- prompt
returns_prompt: bool = True
@dataclass()
class InteractiveEvent:
# the privilege level at the end of the event
result_privilege_level: Optional[str] = None
# list of all of the "steps" in the interact event
event_steps: Optional[List[InteractStep]] = None
class ScrapliCollectorChannel(Channel):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.captured_writes: List[str] = []
def write(self, channel_input: str, redacted: bool = False) -> None:
self.captured_writes.append(channel_input)
super().write(channel_input=channel_input, redacted=redacted)
class ScrapliCollector:
def __init__(
self,
channel_inputs: List[str],
interact_events: List[List[Tuple[str, str, bool]]],
paging_indicator: str,
paging_escape_string: str = "\x1b",
scrapli_connection: Optional[NetworkDriver] = None,
collector_session_filename: str = "scrapli_replay_collector_session.yaml",
**kwargs: Dict[str, Any],
) -> None:
"""
Scrapli Collector Class
Patches scrapli so that we can record the connection inputs and outputs from the channel
Args:
channel_inputs: list of channel inputs to record
interact_events: list of interact events to record
paging_indicator: string that indicates when the device is prompting for user input to
continue paging the output
paging_escape_string: string to use to escape the paging prompt
scrapli_connection: already instantiated scrapli connection -- you can pass this or just
the kwargs necessary to instantiate one for you
collector_session_filename: name of file to save collector session output to
kwargs: kwargs to instantiate scrapli connection, *must* include platform as this will
instantiate the connection via `Scrapli` factory class!
Returns:
None
Raises:
ScrapliReplayException: if no valid scrapli connection or connection data present
"""
logger.debug("creating scrapli replay collector")
self.channel_inputs = channel_inputs
self.interact_events = interact_events
self.paging_indicator = paging_indicator
self.paging_escape_string = paging_escape_string
self.collector_session_filename = collector_session_filename
self.channel_log = BytesIO()
# making the channel log unclose-able so we can retain the channel log even throughout
# connections being closed
self.channel_log.close = lambda: None # type: ignore
if scrapli_connection:
logger.debug("scrapli connection provided")
self.scrapli_connection = scrapli_connection
self.scrapli_connection._base_channel_args.channel_log = self.channel_log
if self.scrapli_connection.isalive():
# want to close it so we can reset the on open (paging stuff)
self.scrapli_connection.close()
else:
logger.debug("no scrapli connection provided, building one from kwargs")
if not kwargs.get("platform"):
msg = "must provide 'platform' as a kwarg if you dont provide a connection object!"
logger.critical(msg)
raise ScrapliReplayException(msg)
if kwargs.pop("channel_log", None):
user_warning(
title="Ignored argument!",
message="channel_log arg provided, replacing with ScrapliCollector channel_log",
)
self.scrapli_connection = Scrapli(
channel_log=self.channel_log,
**kwargs, # type: ignore
)
self.scrapli_connection_original_timeout_transport = (
self.scrapli_connection.timeout_transport
)
# update the channel to be an instance of the ScrapliCollectorChannel
self.scrapli_connection.channel = ScrapliCollectorChannel(
transport=self.scrapli_connection.transport,
base_channel_args=self.scrapli_connection._base_channel_args,
)
# store the "normal" default desired privilege level
self.scrapli_connection_default_desired_privilege_level = (
self.scrapli_connection.default_desired_privilege_level
)
# store and reset the on_open/on_close to None so we can manage when we want to disable
# paging and such
self.scrapli_connection_standard_on_open = self.scrapli_connection.on_open
self.scrapli_connection_standard_on_close = self.scrapli_connection.on_close
self.scrapli_connection.on_open = None
self.scrapli_connection.on_close = None
# bool to just indicate if we have ran the on open stuff
self.on_open_enabled = False
self.on_open_inputs: List[str] = []
self.on_close_inputs: List[str] = []
# flag to indicate if we have collected priv prompts yet
self.collected_priv_prompts = False
# Future: support recording any login auth/banner stuff too
platform_privilege_levels = self.scrapli_connection.privilege_levels.keys()
self.initial_privilege_level = ""
self.privilege_level_prompts: Dict[str, str] = {
privilege_level_name: "" for privilege_level_name in platform_privilege_levels
}
# commands captured from driver privilege levels for escalate/deescalate
self._privilege_escalate_inputs: List[str] = []
self._privilege_deescalate_inputs: List[str] = []
self._interact_privilege_escalations: List[List[Tuple[str, str, bool]]] = []
self.events: Dict[str, Dict[str, Dict[str, Union[StandardEvent, InteractiveEvent]]]] = {
privilege_level_name: {"pre_on_open": {}, "post_on_open": {}}
for privilege_level_name in platform_privilege_levels
}
self.dumpable_events: Dict[str, Dict[str, Dict[str, Any]]] = {
privilege_level_name: {"pre_on_open": {}, "post_on_open": {}}
for privilege_level_name in platform_privilege_levels
}
# this would be similar to the events but for an unknown input, like we have in the v2 thing
self.unknown_events: Dict[str, Dict[str, Optional[StandardEvent]]] = {
privilege_level_name: {"pre_on_open": None, "post_on_open": None}
for privilege_level_name in platform_privilege_levels
}
self.dumpable_unknown_events: Dict[str, Dict[str, Optional[Any]]] = {
privilege_level_name: {"pre_on_open": None, "post_on_open": None}
for privilege_level_name in platform_privilege_levels
}
# this is a list of all possible prompts -- because we are going to use send and expect we
# need to be able to expect any prompt OR the paging pattern... so after open and we collect
# the prompts for each priv level, we can build this list
self.all_expected_patterns = [self.paging_indicator]
self._determine_privilege_inputs()
def open(self) -> None:
"""
Open the Collector and the underlying scrapli connection
Args:
N/A
Returns:
None
Raises:
None
"""
self.scrapli_connection.open()
if not self.initial_privilege_level:
# only need to fetch this on the initial open, not for subsequent opens when we need
# to reconnect!
logger.debug(
"no initial privilege level set, must be first open... setting initial privilege "
"level"
)
self.initial_privilege_level = self._get_current_privilege_level_name()
def close(self) -> None:
"""
Close the Collector and the underlying scrapli connection
Args:
N/A
Returns:
None
Raises:
None
"""
self.scrapli_connection.close()
def _determine_privilege_inputs(self) -> None:
"""
Private method to figure out what the privilege escalation/deescalation inputs are
Args:
N/A
Returns:
None
Raises:
None
"""
logger.debug("building all privilege level inputs/interactions from scrapli driver")
self._privilege_escalate_inputs = [
priv.escalate
for priv in self.scrapli_connection.privilege_levels.values()
if not priv.escalate_auth and priv.escalate
]
self._privilege_deescalate_inputs = [
priv.deescalate
for priv in self.scrapli_connection.privilege_levels.values()
if priv.deescalate
]
interact_privilege_escalations_levels = [
priv
for priv in self.scrapli_connection.privilege_levels.values()
if priv.escalate_auth and priv.escalate_prompt
]
self._interact_privilege_escalations = [
[
(priv.escalate, priv.escalate_prompt, False),
("__AUTH_SECONDARY__", priv.pattern, True),
]
for priv in interact_privilege_escalations_levels
]
def _get_current_privilege_level_name(self, prompt: Optional[str] = None) -> str:
"""
Convenience method to fetch current privilege level name from the current prompt
Args:
prompt: prompt pattern to use, if not supplied, we'll fetch current prompt
Returns:
str: string name of current privilege level
Raises:
N/A
"""
if not prompt:
prompt = self.scrapli_connection.get_prompt()
priv_name: str = self.scrapli_connection._determine_current_priv(prompt)[0]
return priv_name
def _collect_privilege_prompts(self) -> None:
"""
Private method to get all of the prompts for each priv of the underlying device
Args:
N/A
Returns:
None
Raises:
None
"""
for priv_level in self.privilege_level_prompts:
logger.info(f"collecting prompt for priv level {priv_level}")
self.scrapli_connection.acquire_priv(priv_level)
self.privilege_level_prompts[priv_level] = self.scrapli_connection.get_prompt()
self.collected_priv_prompts = True
def _extend_all_expected_prompts(self) -> None:
"""
Extend the "all_expected_prompts" to include all the captured privilege level prompts
Args:
N/A
Returns:
None
Raises:
ScrapliReplayException: if privilege patterns aren't collected before running this
"""
if not self.collected_priv_prompts:
msg = (
"attempting to build all expected prompts pattern, but have not collected privilege"
" level prompts yet, failing"
)
logger.critical(msg)
raise ScrapliReplayException(msg)
self.all_expected_patterns.extend(
[prompt for _, prompt in self.privilege_level_prompts.items()]
)
@staticmethod
def _strip_remaining_ansi(raw_output: bytes) -> str:
"""
Strip remaining ansi chars and decode bytes to string
Unclear why as it seems | |
<reponame>sifraitech/eth2.0-specs
from random import Random
from eth2spec.test.context import (
spec_state_test, expect_assertion_error, always_bls, with_all_phases,
with_custom_state, spec_test, single_phase,
low_balances, misc_balances,
)
from eth2spec.test.helpers.attestations import sign_indexed_attestation
from eth2spec.test.helpers.attester_slashings import (
get_valid_attester_slashing, get_valid_attester_slashing_by_indices,
get_indexed_attestation_participants, get_attestation_2_data, get_attestation_1_data,
)
from eth2spec.test.helpers.proposer_slashings import get_min_slashing_penalty_quotient
from eth2spec.test.helpers.state import (
get_balance,
next_epoch_via_block,
)
def run_attester_slashing_processing(spec, state, attester_slashing, valid=True):
"""
Run ``process_attester_slashing``, yielding:
- pre-state ('pre')
- attester_slashing ('attester_slashing')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
yield 'pre', state
yield 'attester_slashing', attester_slashing
if not valid:
expect_assertion_error(lambda: spec.process_attester_slashing(state, attester_slashing))
yield 'post', None
return
slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
proposer_index = spec.get_beacon_proposer_index(state)
pre_proposer_balance = get_balance(state, proposer_index)
pre_slashing_balances = {slashed_index: get_balance(state, slashed_index) for slashed_index in slashed_indices}
pre_slashing_effectives = {
slashed_index: state.validators[slashed_index].effective_balance
for slashed_index in slashed_indices
}
pre_withdrawalable_epochs = {
slashed_index: state.validators[slashed_index].withdrawable_epoch
for slashed_index in slashed_indices
}
total_proposer_rewards = sum(
effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT
for effective_balance in pre_slashing_effectives.values()
)
# Process slashing
spec.process_attester_slashing(state, attester_slashing)
for slashed_index in slashed_indices:
pre_withdrawalable_epoch = pre_withdrawalable_epochs[slashed_index]
slashed_validator = state.validators[slashed_index]
# Check slashing
assert slashed_validator.slashed
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
if pre_withdrawalable_epoch < spec.FAR_FUTURE_EPOCH:
expected_withdrawable_epoch = max(
pre_withdrawalable_epoch,
spec.get_current_epoch(state) + spec.EPOCHS_PER_SLASHINGS_VECTOR
)
assert slashed_validator.withdrawable_epoch == expected_withdrawable_epoch
else:
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
assert get_balance(state, slashed_index) < pre_slashing_balances[slashed_index]
if proposer_index not in slashed_indices:
# gained whistleblower reward
assert get_balance(state, proposer_index) == pre_proposer_balance + total_proposer_rewards
else:
# gained rewards for all slashings, which may include others. And only lost that of themselves.
expected_balance = (
pre_proposer_balance
+ total_proposer_rewards
- pre_slashing_effectives[proposer_index] // get_min_slashing_penalty_quotient(spec)
)
assert get_balance(state, proposer_index) == expected_balance
yield 'post', state
@with_all_phases
@spec_state_test
def test_success_double(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
yield from run_attester_slashing_processing(spec, state, attester_slashing)
@with_all_phases
@spec_state_test
def test_success_surround(spec, state):
next_epoch_via_block(spec, state)
state.current_justified_checkpoint.epoch += 1
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
att_1_data = get_attestation_1_data(spec, attester_slashing)
att_2_data = get_attestation_2_data(spec, attester_slashing)
# set attestion1 to surround attestation 2
att_1_data.source.epoch = att_2_data.source.epoch - 1
att_1_data.target.epoch = att_2_data.target.epoch + 1
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
yield from run_attester_slashing_processing(spec, state, attester_slashing)
@with_all_phases
@spec_state_test
@always_bls
def test_success_already_exited_recent(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
for index in slashed_indices:
spec.initiate_validator_exit(state, index)
yield from run_attester_slashing_processing(spec, state, attester_slashing)
@with_all_phases
@spec_state_test
@always_bls
def test_success_proposer_index_slashed(spec, state):
# Transition past genesis slot because generally doesn't have a proposer
next_epoch_via_block(spec, state)
proposer_index = spec.get_beacon_proposer_index(state)
attester_slashing = get_valid_attester_slashing_by_indices(
spec, state,
[proposer_index],
signed_1=True, signed_2=True,
)
yield from run_attester_slashing_processing(spec, state, attester_slashing)
@with_all_phases
@spec_state_test
def test_success_attestation_from_future(spec, state):
# Transition state to future to enable generation of a "future" attestation
future_state = state.copy()
next_epoch_via_block(spec, future_state)
# Generate slashing using the future state
attester_slashing = get_valid_attester_slashing(
spec, future_state,
slot=state.slot + 5, # Slot is in the future wrt `state`
signed_1=True, signed_2=True
)
yield from run_attester_slashing_processing(spec, state, attester_slashing)
@with_all_phases
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
def test_success_low_balances(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
yield from run_attester_slashing_processing(spec, state, attester_slashing)
@with_all_phases
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
def test_success_misc_balances(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
yield from run_attester_slashing_processing(spec, state, attester_slashing)
@with_all_phases
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
def test_success_with_effective_balance_disparity(spec, state):
# Jitter balances to be different from effective balances
rng = Random(12345)
for i in range(len(state.balances)):
pre = int(state.balances[i])
state.balances[i] += rng.randrange(max(pre - 5000, 0), pre + 5000)
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
yield from run_attester_slashing_processing(spec, state, attester_slashing)
@with_all_phases
@spec_state_test
@always_bls
def test_success_already_exited_long_ago(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
for index in slashed_indices:
spec.initiate_validator_exit(state, index)
state.validators[index].withdrawable_epoch = spec.get_current_epoch(state) + 2
yield from run_attester_slashing_processing(spec, state, attester_slashing)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_1(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_2(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_1_and_2(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
def test_same_data(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
indexed_att_1 = attester_slashing.attestation_1
att_2_data = get_attestation_2_data(spec, attester_slashing)
indexed_att_1.data = att_2_data
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
def test_no_double_or_surround(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
att_1_data = get_attestation_1_data(spec, attester_slashing)
att_1_data.target.epoch += 1
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
def test_participants_already_slashed(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
# set all indices to slashed
validator_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
for index in validator_indices:
state.validators[index].slashed = True
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_att1_high_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
indices.append(spec.ValidatorIndex(len(state.validators))) # off by 1
attester_slashing.attestation_1.attesting_indices = indices
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_att2_high_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_2)
indices.append(spec.ValidatorIndex(len(state.validators))) # off by 1
attester_slashing.attestation_2.attesting_indices = indices
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_att1_empty_indices(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
attester_slashing.attestation_1.attesting_indices = []
attester_slashing.attestation_1.signature = spec.bls.G2_POINT_AT_INFINITY
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_att2_empty_indices(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
attester_slashing.attestation_2.attesting_indices = []
attester_slashing.attestation_2.signature = spec.bls.G2_POINT_AT_INFINITY
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_all_empty_indices(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False)
attester_slashing.attestation_1.attesting_indices = []
attester_slashing.attestation_1.signature = spec.bls.G2_POINT_AT_INFINITY
attester_slashing.attestation_2.attesting_indices = []
attester_slashing.attestation_2.signature = spec.bls.G2_POINT_AT_INFINITY
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_att1_bad_extra_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
options = list(set(range(len(state.validators))) - set(indices))
indices.append(options[len(options) // 2]) # add random index, not previously in attestation.
attester_slashing.attestation_1.attesting_indices = sorted(indices)
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
# see if the bad extra index is spotted, and slashing is aborted.
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_att1_bad_replaced_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = attester_slashing.attestation_1.attesting_indices
options = list(set(range(len(state.validators))) - set(indices))
indices[3] = options[len(options) // 2] # replace with random index, not previously in attestation.
attester_slashing.attestation_1.attesting_indices = sorted(indices)
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
# see if the bad replaced index is spotted, and slashing is aborted.
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_att2_bad_extra_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = attester_slashing.attestation_2.attesting_indices
options = list(set(range(len(state.validators))) - set(indices))
indices.append(options[len(options) // 2]) # add random index, not previously in attestation.
attester_slashing.attestation_2.attesting_indices = sorted(indices)
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
# see if the bad extra index is spotted, and slashing is aborted.
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_att2_bad_replaced_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = attester_slashing.attestation_2.attesting_indices
options = list(set(range(len(state.validators))) - set(indices))
indices[3] = options[len(options) // 2] # replace with random index, not previously in attestation.
attester_slashing.attestation_2.attesting_indices = sorted(indices)
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
# see if the bad replaced index is spotted, and slashing is aborted.
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_att1_duplicate_index_normal_signed(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
indices = list(attester_slashing.attestation_1.attesting_indices)
indices.pop(1) # remove an index, make room for the additional duplicate index.
attester_slashing.attestation_1.attesting_indices = sorted(indices)
# The signature will be valid for a single occurrence. If the transition accidentally ignores the duplicate.
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
indices.append(indices[0]) # add one of the indices a second time
attester_slashing.attestation_1.attesting_indices = sorted(indices)
# it will just appear normal, unless the double index is spotted
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_att2_duplicate_index_normal_signed(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
indices = list(attester_slashing.attestation_2.attesting_indices)
indices.pop(2) # remove an index, make room for the additional duplicate index.
attester_slashing.attestation_2.attesting_indices = sorted(indices)
# The signature will be valid for a single occurrence. If the transition accidentally ignores the duplicate.
sign_indexed_attestation(spec, state, attester_slashing.attestation_2)
indices.append(indices[1]) # add one of the indices a second time
attester_slashing.attestation_2.attesting_indices = sorted(indices)
# it will just appear normal, unless the double index is spotted
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_att1_duplicate_index_double_signed(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
indices = list(attester_slashing.attestation_1.attesting_indices)
indices.pop(1) # remove an index, make room for the additional duplicate index.
indices.append(indices[2]) # add one of the indices a second time
attester_slashing.attestation_1.attesting_indices = sorted(indices)
sign_indexed_attestation(spec, state, attester_slashing.attestation_1) # will have one attester signing it double
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_att2_duplicate_index_double_signed(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
indices = list(attester_slashing.attestation_2.attesting_indices)
indices.pop(1) # remove an index, make room for the additional duplicate index.
indices.append(indices[2]) # add one of the indices a second time
attester_slashing.attestation_2.attesting_indices = sorted(indices)
sign_indexed_attestation(spec, state, attester_slashing.attestation_2) # will have one attester signing it double
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_all_phases
@spec_state_test
def test_unsorted_att_1(spec, state):
| |
column:
#-------------------------------------------------------------------------
def on_click(self, object):
""" Called when the user clicks on the column.
"""
pass
#-------------------------------------------------------------------------
# Called when the user double-clicks on the column:
#-------------------------------------------------------------------------
def on_dclick(self, object):
""" Called when the user clicks on the column.
"""
pass
#-------------------------------------------------------------------------
# Returns the result of comparing the column of two different objects:
#-------------------------------------------------------------------------
def cmp(self, object1, object2):
""" Returns the result of comparing the column of two different objects.
This is deprecated.
"""
return ((self.key(object1) > self.key(object2)) -
(self.key(object1) < self.key(object2)))
#-------------------------------------------------------------------------
# Returns the string representation of the table column:
#-------------------------------------------------------------------------
def __str__(self):
""" Returns the string representation of the table column.
"""
return self.get_label()
#-------------------------------------------------------------------------
# 'ObjectColumn' class:
#-------------------------------------------------------------------------
class ObjectColumn(TableColumn):
""" A column for editing objects.
"""
#-------------------------------------------------------------------------
# Trait definitions:
#-------------------------------------------------------------------------
# Name of the object trait associated with this column:
name = Str
# Column label to use for this column:
label = Property
# Trait editor used to edit the contents of this column:
editor = Instance(EditorFactory)
# The editor style to use to edit the contents of this column:
style = EditorStyle
# Format string to apply to column values:
format = Str('%s')
# Format function to apply to column values:
format_func = Callable
#-------------------------------------------------------------------------
# Trait view definitions:
#-------------------------------------------------------------------------
traits_view = View([['name', 'label', 'type',
'|[Column Information]'],
['horizontal_alignment{Horizontal}@',
'vertical_alignment{Vertical}@',
'|[Alignment]'],
['editable', '9', 'droppable', '9', 'visible',
'-[Options]>'],
'|{Column}'],
[['text_color@', 'cell_color@',
'read_only_cell_color@',
'|[UI Colors]'],
'|{Colors}'],
[['text_font@',
'|[Font]<>'],
'|{Font}'],
['menu@',
'|{Menu}'],
['editor@',
'|{Editor}'])
#-------------------------------------------------------------------------
# Implementation of the 'label' property:
#-------------------------------------------------------------------------
def _get_label(self):
""" Gets the label of the column.
"""
if self._label is not None:
return self._label
return user_name_for(self.name)
def _set_label(self, label):
old, self._label = self._label, label
if old != label:
self.trait_property_changed('label', old, label)
#-------------------------------------------------------------------------
# Gets the value of the column for a specified object:
#-------------------------------------------------------------------------
def get_raw_value(self, object):
""" Gets the unformatted value of the column for a specified object.
"""
try:
return xgetattr(self.get_object(object), self.name)
except Exception as e:
from traitsui.api import raise_to_debug
raise_to_debug()
return None
def get_value(self, object):
""" Gets the formatted value of the column for a specified object.
"""
try:
if self.format_func is not None:
return self.format_func(self.get_raw_value(object))
return self.format % (self.get_raw_value(object), )
except:
logger.exception('Error occurred trying to format a %s value' %
self.__class__.__name__)
return 'Format!'
#-------------------------------------------------------------------------
# Returns the drag value for the column:
#-------------------------------------------------------------------------
def get_drag_value(self, object):
"""Returns the drag value for the column.
"""
return self.get_raw_value(object)
#-------------------------------------------------------------------------
# Sets the value of the column for a specified object:
#-------------------------------------------------------------------------
def set_value(self, object, value):
""" Sets the value of the column for a specified object.
"""
target, name = self.target_name(object)
setattr(target, name, value)
#-------------------------------------------------------------------------
# Gets the editor for the column of a specified object:
#-------------------------------------------------------------------------
def get_editor(self, object):
""" Gets the editor for the column of a specified object.
"""
if self.editor is not None:
return self.editor
target, name = self.target_name(object)
return target.base_trait(name).get_editor()
#-------------------------------------------------------------------------
# Gets the editor style for the column of a specified object:
#-------------------------------------------------------------------------
def get_style(self, object):
""" Gets the editor style for the column of a specified object.
"""
return self.style
#-------------------------------------------------------------------------
# Function that gets the value to sort by for a column
#-------------------------------------------------------------------------
def key(self, object):
""" Returns the value to use for sorting.
"""
return self.get_raw_value(object)
#-------------------------------------------------------------------------
# Returns whether a specified value is valid for dropping on the column
# for a specified object:
#-------------------------------------------------------------------------
def is_droppable(self, object, value):
""" Returns whether a specified value is valid for dropping on the
column for a specified object.
"""
if self.droppable:
try:
target, name = self.target_name(object)
target.base_trait(name).validate(target, name, value)
return True
except:
pass
return False
#-------------------------------------------------------------------------
# Returns the target object and name for the column:
#-------------------------------------------------------------------------
def target_name(self, object):
""" Returns the target object and name for the column.
"""
object = self.get_object(object)
name = self.name
col = name.rfind('.')
if col < 0:
return (object, name)
return (xgetattr(object, name[:col]), name[col + 1:])
#-------------------------------------------------------------------------
# 'ExpressionColumn' class:
#-------------------------------------------------------------------------
class ExpressionColumn(ObjectColumn):
""" A column for displaying computed values.
"""
#-------------------------------------------------------------------------
# Trait definitions:
#-------------------------------------------------------------------------
# The Python expression used to return the value of the column:
expression = Expression
# Is this column editable?
editable = Constant(False)
# The globals dictionary that should be passed to the expression
# evaluation:
globals = Any({})
#-------------------------------------------------------------------------
# Gets the value of the column for a specified object:
#-------------------------------------------------------------------------
def get_raw_value(self, object):
""" Gets the unformatted value of the column for a specified object.
"""
try:
return eval(self.expression_, self.globals, {'object': object})
except Exception:
logger.exception('Error evaluating table column expression: %s' %
self.expression)
return None
#-------------------------------------------------------------------------
# 'NumericColumn' class:
#-------------------------------------------------------------------------
class NumericColumn(ObjectColumn):
""" A column for editing Numeric arrays.
"""
#-------------------------------------------------------------------------
# Trait definitions:
#-------------------------------------------------------------------------
# Column label to use for this column
label = Property
# Text color this column when selected
selected_text_color = Color('black')
# Text font for this column when selected
selected_text_font = Font
# Cell background color for this column when selected
selected_cell_color = Color(0xD8FFD8)
# Formatting string for the cell value
format = Str('%s')
# Horizontal alignment of text in the column; this value overrides the
# default.
horizontal_alignment = 'center'
#-------------------------------------------------------------------------
# Implementation of the 'label' property:
#-------------------------------------------------------------------------
def _get_label(self):
""" Gets the label of the column.
"""
if self._label is not None:
return self._label
return self.name
def _set_label(self, label):
old, self._label = self._label, label
if old != label:
self.trait_property_changed('label', old, label)
#-------------------------------------------------------------------------
# Gets the type of data for the column for a specified object row:
#-------------------------------------------------------------------------
def get_type(self, object):
""" Gets the type of data for the column for a specified object row.
"""
return self.type
#-------------------------------------------------------------------------
# Returns the text color for the column for a specified object row:
#-------------------------------------------------------------------------
def get_text_color(self, object):
""" Returns the text color for the column for a specified object row.
"""
if self._is_selected(object):
return self.selected_text_color_
return self.text_color_
#-------------------------------------------------------------------------
# Returns the text font for the column for a specified object row:
#-------------------------------------------------------------------------
def get_text_font(self, object):
""" Returns the text font for the column for a specified object row.
"""
if self._is_selected(object):
return self.selected_text_font
return self.text_font
#-------------------------------------------------------------------------
# Returns the cell background color for the column for a specified object
# row:
#-------------------------------------------------------------------------
def get_cell_color(self, object):
""" Returns the cell background color for the column for a specified
object row.
"""
if self.is_editable(object):
if self._is_selected(object):
return self.selected_cell_color_
return self.cell_color_
return self.read_only_cell_color_
#-------------------------------------------------------------------------
# Returns the horizontal alignment for the column for a specified object
# row:
#-------------------------------------------------------------------------
def get_horizontal_alignment(self, object):
""" Returns the horizontal alignment for the column for a specified
object row.
"""
return self.horizontal_alignment
#-------------------------------------------------------------------------
# Returns the vertical alignment for the column for a specified object row:
#-------------------------------------------------------------------------
def get_vertical_alignment(self, object):
""" Returns the vertical alignment for the column for a specified
object row.
"""
return self.vertical_alignment
#-------------------------------------------------------------------------
# Returns whether the column is editable for a specified object row:
#-------------------------------------------------------------------------
def is_editable(self, object):
""" Returns whether the column is editable for a specified object row.
"""
return self.editable
#-------------------------------------------------------------------------
# Returns whether a specified value is valid for dropping on the column
# for a specified object row:
#-------------------------------------------------------------------------
def is_droppable(self, object, row, value):
""" Returns whether a specified value is valid for dropping on the
column for a specified object row.
"""
return self.droppable
#-------------------------------------------------------------------------
# Returns the context menu to display when the user right-clicks on the
# column for a specified object row:
#-------------------------------------------------------------------------
def get_menu(self, object, row):
""" Returns the context menu to display when the user right-clicks on
the column for a specified object row.
"""
return self.menu
#-------------------------------------------------------------------------
# Gets the value of the column for a specified object row:
#-------------------------------------------------------------------------
def get_value(self, object):
""" Gets the value of the column for a specified object row.
"""
try:
value = getattr(object, self.name)
try:
return self.format % (value, )
except:
return 'Format!'
except:
return 'Undefined!'
#-------------------------------------------------------------------------
# Sets the value of the column for a specified object row:
#-------------------------------------------------------------------------
def set_value(self, object, row, value):
""" | |
navselecttoolbar w/ interactive buttons
self.toolbar = NavMapToolbar(self.canvas, self.root,self)
self.toolbar.update()
self.plot_widget = self.canvas.get_tk_widget()
self.plot_widget.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.canvas.show()
def replot_maps(self, elemlist, plotmaps, title):
''' destroy prior map widget and replace
general purpose 2D plotter for shiftmaps, amplmaps, elemmaps'''
print('Starting mapviewer plotmaps')
# generate xy list for lasso (same dimension as single map)
# dimension is # of rows then # of columns
# X is column number, Y is row number (reverse of above)
self.xys=[[i,j] for i in range(0,plotmaps[0].shape[0]) for j in range(0,plotmaps[0].shape[1])]
#self.xys=[[i,j] for i in range(0,plotmaps[0].shape[0]) for j in range(0,plotmaps[0].shape[1])]
try:
self.canvas.get_tk_widget().destroy() # destroy previous plot
self.toolbar.destroy()
except:
pass
# Generate set of xys of dim^2 (used by lasso selections)
numcols=min(len(elemlist),2) # 1 or 2 columns
numrows=math.ceil(len(elemlist)/2)
self.figure = mpl.figure.Figure(figsize=PLOT_SIZE, dpi=100)
self.figure.subplots_adjust(bottom=0.15,right=0.95,top=0.95)
# self.figure.tight_layout()
self.ax=[]
for i, elem in enumerate(elemlist):
self.ax.append(self.figure.add_subplot(numrows,numcols,i+1))
self.ax[i].imshow(plotmaps[i])
#self.ax[i].format_coord = lambda x, y: "({2:f}, ".format(x) + "{2:f})".format(x)
self.figure.suptitle(title, fontsize=12)
self.canvas = FigureCanvasTkAgg(self.figure, self.root)
self.toolbar = NavMapToolbar(self.canvas, self.root,self)
self.toolbar.update()
self.plot_widget = self.canvas.get_tk_widget()
self.plot_widget.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.canvas.show()
class SpectraViewer():
''' Spectral plotter window for extracted spectra '''
def __init__(self,root, parent):
self.root = root
self.parent = parent
self.figure = mpl.figure.Figure(figsize=PLOT_SIZE, dpi=100)
self.ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(bottom=0.15,right=0.95,top=0.95)
self.canvas = FigureCanvasTkAgg(self.figure,self.root)
# just use standard toolbar
self.toolbar = NavigationToolbar2TkAgg(self.canvas,self.root)
self.toolbar.update()
self.plot_widget = self.canvas.get_tk_widget()
self.plot_widget.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.canvas.show()
def plot_multiplex(self, extracted, energy, elemdata, currxy, **pkwargs):
''' Add variable number of subplots
called from GUIrois
extracted is 1D numpy array (same len as full multiplex) -- either deriv
or direct is passed (depending on toggle)
energy is full multiplex range ev vals for extracted spectrum (as list)
elemdata has peak stop/start indices for plots -- only active element subset
currxy is X, Y of extracted spectrum (or avg x,y of lassoed ROI) for text label
pkwargs
vals in pkwargs -- integ params (slope, intercept, peak) or deriv params
(negpeak, pospeak) for extra plot labeling
'''
# since # of subplots can change, need to destroy and recreate
try:
self.canvas.get_tk_widget().destroy() # destroy previous plot
self.toolbar.destroy()
except:
pass
plottype=pkwargs.get('type') # integ or deriv
vals=pkwargs.get('vals') # list with scatter points/backfits/etc.
# plot from elemdata[i][ holds indices
numcols=min(len(elemdata),2) # 1 or 2 columns
numrows=math.ceil(len(elemdata)/2)
self.figure = mpl.figure.Figure(figsize=PLOT_SIZE, dpi=100)
self.figure.subplots_adjust(bottom=0.15,right=0.95,top=0.95)
self.ax=[]
for i, elemd in enumerate(elemdata):
self.ax.append(self.figure.add_subplot(numrows,numcols,i+1))
[lowind, junk]=elemd[3]
[junk,highind]=elemd[4]
idealev=elemd[8] # ideal peak eV
symbol=elemd[0] # name of element/peak
self.ax[i].plot(energy[lowind:highind], extracted[lowind:highind])
self.ax[i].axvline(x=idealev)
energy=[int(i) for i in energy] # ensure ints
# for deriv vals is list of dfs w/ scatter points
if plottype=='deriv':
# derxvals and deryvals passed np arrays to add pospeak/negpeak
# as scatter plot
[elem, xvals, yvals, ampl]=vals[i]
print('Vals are ', elem, xvals, yvals, ampl)
self.ax[i].scatter(xvals, yvals, color='r')
# add elem and amplitude as text label
tempstr=symbol+' Ampl:'+ "%.2f" % ampl
self.ax[i].set_title(tempstr, fontsize=10)
elif plottype=='integ':
# elem, peak energy (integration center), integcnts, slope/ intercept of backfit
[elem, peakev, integcnts, slope, intercept]=vals[i]
# Scatter point at integration center
yvals= extracted[energy.index(peakev)]
self.ax[i].scatter(peakev, yvals, color='r')
# Plot background fit line
x=np.linspace(min(energy[lowind:highind]), max(energy[lowind:highind]), 100)
print('Min/max are',min(energy[lowind:highind]), max(energy[lowind:highind]))
self.ax[i].plot(x, x*slope+intercept, color='r')
# add elem symbol and integcounts as subplot title
tempstr=symbol+' Integcnts:'+str(integcnts)
self.ax[i].set_title(tempstr, fontsize=10)
# label with integrat
# add vertical lines at ideal position
labelstr='Row: '+str(currxy[0])+' Column: '+str(currxy[1])
self.ax[0].text(0.05,0.95, labelstr, transform=self.ax[0].transAxes, fontsize=12)
# recreate and pack
self.canvas = FigureCanvasTkAgg(self.figure, self.root)
self.toolbar = NavigationToolbar2TkAgg(self.canvas,self.root)
self.toolbar.update()
self.plot_widget = self.canvas.get_tk_widget()
self.plot_widget.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.canvas.show()
def label_quant(self, elems, vals):
''' Add quant text label with active elems and at. % values '''
if self.EDXfile is None:return
# Add vertical lines at known element energies
fullstr=''
for i, (elem,val) in enumerate(zip(elems, vals)):
tempstr=r'$%s_{%.0f}$' %(elem, float(val))
fullstr+=tempstr
# transform=ax.transAxes uses coords from 0 to 1 (instead of true x and y vals)
self.ax.text(0.05,0.95, fullstr, fontsize=30, verticalalignment='top', transform=self.ax.transAxes)
self.canvas.show()
class GUIrois():
''' Parent is GUImain, manages QMfile displayed in GUIplotter
handles element and plot selections '''
def __init__(self,root,parent):
self.root = root
self.parent = parent
# instance of QMfile local to the roi/opts window
self.QMfile = None
self.tkelems=[] # bools list for elem display or quant
self.activequant=[] # for at % results (on extracted spectrum)
self.showelems=False # toggle for showing elemental lines on plot
self.currxy = None # x,y of extracted spectrum (or avg x,y if mult pixels)
self.togglederiv =False # plot quant
# Element selector checkboxes
self.left_frame = tk.Frame(self.root)
self.elems_frame = tk.Frame(self.left_frame, pady=10)
self.elems_frame.pack(side=tk.TOP,fill=tk.X,expand=1)
self.misc_frame = tk.Frame(self.left_frame, pady=10)
self.misc_frame.pack(side=tk.TOP,fill=tk.X,expand=1)
self.left_frame.pack(side=tk.LEFT)
# Frame for display of counts/quant results (at right)
self.quant_frame = tk.Frame(self.root, pady=10)
self.quant_frame.pack(side=tk.LEFT,fill=tk.X,expand=1)
# Element presets (top of misc frame)
rowframe=tk.Frame(self.misc_frame)
tk.Button(rowframe, text='Clear all', command=self.clearall).pack(side=tk.LEFT,fill=tk.X,expand=1)
tk.Button(rowframe, text='Select all', command=self.selectall).pack(side=tk.LEFT,fill=tk.X,expand=1)
rowframe.pack(fill=tk.X, expand=1)
# permanent buttons in misc_frame
rowframe=tk.Frame(self.misc_frame)
self.plottype=tk.StringVar()
tk.Radiobutton(rowframe, text='Shiftmap',value='Shiftmap',
variable=self.plottype).pack(side=tk.LEFT,fill=tk.X,expand=1)
tk.Radiobutton(rowframe, text='Amplmap',value='Amplmap',
variable=self.plottype).pack(side=tk.LEFT,fill=tk.X,expand=1)
tk.Radiobutton(rowframe, text='Integmap',value='Integmap',
variable=self.plottype).pack(side=tk.LEFT,fill=tk.X,expand=1)
tk.Radiobutton(rowframe, text='Countsmax',value='Countsmax',
variable=self.plottype).pack(side=tk.LEFT,fill=tk.X,expand=1)
tk.Radiobutton(rowframe, text='Elemmap',value='Elemmap',
variable=self.plottype).pack(side=tk.LEFT,fill=tk.X,expand=1)
tk.Button(rowframe, text='Plot', command=self.plot_maps).pack(side=tk.LEFT,fill=tk.X,expand=1)
rowframe.pack(fill=tk.X, expand=1)
self.toggle_button = tk.Button(
self.misc_frame,text="Toggle deriv/direct",command=self.toggle_deriv)
self.toggle_button.pack(side=tk.TOP,fill=tk.X,expand=1)
self.label_button = tk.Button(
self.misc_frame,text="Label elements",command=self.label_elems)
self.label_button.pack(side=tk.TOP,fill=tk.X,expand=1)
self.quant_button = tk.Button(
self.misc_frame,text="Update quant", command=self.do_quant)
self.quant_button.pack(side=tk.TOP,fill=tk.X,expand=1)
def create_QMfile(self, directory):
''' Creates QM file instance (called from menu)
automatically finds pixarray and works from there '''
print('Creating QM file.')
self.QMfile = AESquantmap(directory)
print("QMfile created with name ", self.QMfile.uniquename)
#print("QMfile", QMfile.uniquename, "created.")
for child in self.elems_frame.winfo_children():
child.destroy()
# no direct pass to GUIplotter (only 2D projections)
# loads quant elements into elems frame
self.display_elems()
# go ahead and auto-load amplmaps, integmaps, etc if existing
def save_specimage(self):
''' Call AESquantmap save_specimage '''
if not self.QMfile:
return
self.QMfile.save_specimage()
def toggle_deriv(self):
''' Toggle plotting from direct counts plot to s7d7 smooth-deriv '''
if self.togglederiv==False:
self.togglederiv=True
else:
self.togglederiv=False
def selectall(self):
''' Clear selected elements '''
for i, tkbool in enumerate(self.tkelems):
self.tkelems[i].set(1)
def clearall(self):
''' Clear selected elements '''
for i, tkbool in enumerate(self.tkelems):
self.tkelems[i].set(0)
def load_maps(self):
'''Menu/main lauched '''
if self.QMfile is not None:
self.QMfile.load_maps()
def save_maps(self):
'''Menu/main lauched save of amplmaps, integmaps and shiftmaps '''
if self.QMfile is not None:
self.QMfile.save_maps()
def save_pixarray(self):
'''Menu/main lauched save of pixarray file (after linking with
underlying data files '''
if self.QMfile is not None:
self.QMfile.save_pixarray()
def save_ampl_images(self):
''' Save all extracted amplitude images as separate jpgs '''
if self.QMfile is not None:
self.QMfile.save_ampl_images()
def find_all_peaks(self):
''' Normal and best method for data extraction from specimage '''
if self.QMfile is not None:
self.QMfile.find_all_peaks()
def find_peaks(self):
''' For selected element(s), find peak center '''
# check if shiftmaps and amplitudes maps have already been saved
print('Running find_peaks in GUIroi')
for i, tkbool in enumerate(self.tkelems):
if tkbool.get():
self.QMfile.find_negpeaks(i)
print('Negpeak positions found for element', str(i))
def plot_multiplex(self):
''' Display current extracted spectrum in specviewer
shows only active elems '''
if self.QMfile.extracted is None: return
actelemdata=[]
vals=[] # for scatter points on spectral plots (active elems only)
for i, tkbool in enumerate(self.tkelems):
if tkbool.get():
actelemdata.append(self.QMfile.elemdata[i])
if self.togglederiv==False:
vals.append(self.QMfile.integparams[i])
else:
vals.append(self.QMfile.derivparams[i])
print("Plotting current extracted spectrum")
# pass current extracted spectrum or its deriv (1d np arr) and subset of active elem data info
if self.togglederiv==False: # plot direct
# pass integration center/ background fit for plotting
pkwargs={'type':'integ', 'vals':vals}
self.parent.specviewer.plot_multiplex(self.QMfile.extracted, self.QMfile.energy,
actelemdata, self.currxy, **pkwargs)
else: # plot deriv
# pass list of deriv params (xvals/yvals) for plot for each peak
pkwargs={'type':'deriv', 'vals':vals}
self.parent.specviewer.plot_multiplex(self.QMfile.extracts7d7, self.QMfile.energy,
actelemdata, self.currxy, **pkwargs)
def plot_maps(self):
''' Display 2D arrays of various types in mapviewer '''
activeelems=[]
plotmaps=[]
title=''
print('# of element vars is', len(self.tkelems))
for i, tkbool in enumerate(self.tkelems):
if tkbool.get():
if self.plottype.get()=='Shiftmap':
# Use togglederiv to decide between deriv shift and integ shift
print('This i is ', print(i))
if self.QMfile.shiftmaps[i] is not None:
activeelems.append(self.QMfile.elements[i])
if self.togglederiv: # use deriv based shift
plotmaps.append(self.QMfile.shiftmaps[i][:,:,0])
title='Peak shift deriv'
else: # use direct peak shift
plotmaps.append(self.QMfile.shiftmaps[i][:,:,1])
title='Peak shift direct'
elif self.plottype.get()=='Amplmap':
print('This i is ', print(i))
if self.QMfile.amplmaps[i] is not None:
activeelems.append(self.QMfile.elements[i])
# 0th layer is amplitude
plotmaps.append(self.QMfile.amplmaps[i][:,:, 0])
title='Peak amplitude'
elif self.plottype.get()=='Integmap':
print('This i is ', print(i))
if self.QMfile.integmaps[i] is | |
CONTACTS_FOR_NO_MORE_DATA + 1, f'Observed {output_records} output records (expected {CONTACTS_FOR_NO_MORE_DATA + 1})'
assert error_records == 0, f'Observed {error_records} error records (expected 0)'
finally:
_clean_up(sdc_executor, pipeline, client, contact_ids)
@salesforce
def test_salesforce_destination_null_relationship(sdc_builder, sdc_executor, salesforce):
"""Test that we can clear related external ID fields (SDC-12704).
Only applicable to SOAP API as Bulk API does not allow this.
The pipeline looks like:
dev_raw_data_source >> salesforce_destination
Args:
sdc_builder (:py:class:`streamsets.testframework.Platform`): Platform instance
sdc_executor (:py:class:`streamsets.sdk.DataCollector`): Data Collector executor instance
salesforce (:py:class:`testframework.environments.SalesforceInstance`): Salesforce environment
"""
client = salesforce.client
inserted_ids = None
try:
# Using Salesforce client, create rows in Contact.
logger.info('Creating rows using Salesforce client ...')
DATA_TO_INSERT[0]["Email"] = f'{<EMAIL>'
DATA_TO_INSERT[1]["Email"]= f'{<EMAIL>}_<EMAIL>'
DATA_TO_INSERT[2]["Email"] = f'{STR_15_RANDOM}_<EMAIL>'
inserted_ids = _get_ids(client.bulk.Contact.insert(DATA_TO_INSERT), 'id')
# Link the records via ReportsToId
logger.info('Updating rows using Salesforce client ...')
data_for_update = [{'Id': inserted_ids[1]["Id"], 'ReportsToId': inserted_ids[0]["Id"]},
{'Id': inserted_ids[2]["Id"], 'ReportsToId': inserted_ids[1]["Id"]}]
client.bulk.Contact.update(data_for_update)
# Now disconnect the created contacts from each other
csv_data_to_insert = ['Id,ReportsTo.Email']
csv_data_to_insert.append(f'{inserted_ids[1]["Id"]},')
csv_data_to_insert.append(f'{inserted_ids[2]["Id"]},')
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = get_dev_raw_data_source(pipeline_builder, csv_data_to_insert)
salesforce_destination = pipeline_builder.add_stage('Salesforce', type='destination')
field_mapping = [{'sdcField': '/Id', 'salesforceField': 'Id'},
{'sdcField': '/ReportsTo.Email', 'salesforceField': 'ReportsTo.Email'}]
salesforce_destination.set_attributes(default_operation='UPDATE',
field_mapping=field_mapping,
sobject_type=CONTACT,
use_bulk_api=False)
dev_raw_data_source >> salesforce_destination
pipeline = pipeline_builder.build().configure_for_environment(salesforce)
sdc_executor.add_pipeline(pipeline)
# Now the pipeline will make the contacts report to each other
logger.info('Starting Salesforce destination pipeline and waiting for it to produce records ...')
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# Using Salesforce connection, read the contents in the Salesforce destination.
query_str = ("SELECT Id, Email, ReportsToId FROM Contact "
f'WHERE Email LIKE \'{STR_15_RANDOM}%\'')
result = client.query(query_str)
# Nobody should report to anybody any more
assert None == result['records'][0]['ReportsToId']
assert None == result['records'][1]['ReportsToId']
assert None == result['records'][2]['ReportsToId']
finally:
_clean_up(sdc_executor, pipeline, client, inserted_ids)
@salesforce
@pytest.mark.parametrize(('api'), [
'soap',
'bulk'
])
def test_salesforce_destination_polymorphic(sdc_builder, sdc_executor, salesforce, api):
"""Test that we can write to polymorphic external ID fields (SDC-13117).
Create a case, since its owner can be a user or a group.
The pipeline looks like:
dev_raw_data_source >> salesforce_destination
Args:
sdc_builder (:py:class:`streamsets.testframework.Platform`): Platform instance
sdc_executor (:py:class:`streamsets.sdk.DataCollector`): Data Collector executor instance
salesforce (:py:class:`testframework.environments.SalesforceInstance`): Salesforce environment
api (:obj:`str`): API to test: 'soap' or 'bulk'
"""
client = salesforce.client
case_id = None
try:
# Using Salesforce client, create a Case
logger.info('Creating rows using Salesforce client ...')
result = client.Case.create({'Subject': CASE_SUBJECT})
case_id = result['id']
# Set the case owner. Even though we're not changing the owner, SDC-13117 would cause an error to
# be thrown due to the bad syntax for the field name
csv_data_to_insert = ['Id,Owner']
csv_data_to_insert.append(f'{case_id},{salesforce.username}')
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = get_dev_raw_data_source(pipeline_builder, csv_data_to_insert)
salesforce_destination = pipeline_builder.add_stage('Salesforce', type='destination')
field_mapping = [{'sdcField': '/Id', 'salesforceField': 'Id'},
{'sdcField': '/Owner', 'salesforceField': 'User:Owner.Username'}]
salesforce_destination.set_attributes(default_operation='UPDATE',
field_mapping=field_mapping,
sobject_type='Case',
use_bulk_api=(api == 'bulk'))
dev_raw_data_source >> salesforce_destination
pipeline = pipeline_builder.build().configure_for_environment(salesforce)
sdc_executor.add_pipeline(pipeline)
# Now the pipeline will update the Case
logger.info('Starting Salesforce destination pipeline and waiting for it to produce records ...')
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# Using Salesforce connection, read the Case, just to check
query_str = (f"SELECT Id, Subject, Owner.Username FROM Case WHERE Id = '{case_id}'")
result = client.query(query_str)
assert 1 == len(result['records'])
assert case_id == result['records'][0]['Id']
assert CASE_SUBJECT == result['records'][0]['Subject']
assert salesforce.username == result['records'][0]['Owner']['Username']
finally:
if sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING':
logger.info('Stopping pipeline')
sdc_executor.stop_pipeline(pipeline)
logger.info('Deleting records ...')
if (case_id):
client.Case.delete(case_id)
@salesforce
@pytest.mark.parametrize(('api'), [
'soap',
'bulk'
])
def test_salesforce_datetime_in_history(sdc_builder, sdc_executor, salesforce, api):
"""Test SDC-12334 - field history data is untyped in the Salesforce schema, since OldValue and NewValue depend on
the field that changed. For some datatypes, the XML holds type information in an xmltype attribute. We were using
this to create the correct SDC field type, but not handling datetimes, throwing a FORCE_04 error.
ActivatedDate on Contract is one of the few datetime fields that will show up in a standard object's field history.
The pipeline looks like:
salesforce_origin >> trash
Args:
sdc_builder (:py:class:`streamsets.testframework.Platform`): Platform instance
sdc_executor (:py:class:`streamsets.sdk.DataCollector`): Data Collector executor instance
salesforce (:py:class:`testframework.environments.SalesforceInstance`): Salesforce environment
api (:obj:`str`): API to test: 'soap' or 'bulk'
"""
client = salesforce.client
try:
# Create an account
acc = client.Account.create({'Name': str(uuid4())})
# Create a contract for that account
con = client.Contract.create({'AccountId': acc['id']})
# Update the contract status - this will have the side effect of updating ActivatedDate
client.Contract.update(con['id'], {'Status': 'Activated'})
query = f"SELECT Id, NewValue FROM ContractHistory WHERE Field = 'ActivatedDate' AND ContractId = '{con['id']}'"
pipeline_builder = sdc_builder.get_pipeline_builder()
salesforce_origin = pipeline_builder.add_stage('Salesforce', type='origin')
salesforce_origin.set_attributes(soql_query=query,
disable_query_validation=True,
use_bulk_api=(api == 'bulk'),
subscribe_for_notifications=False)
trash = pipeline_builder.add_stage('Trash')
salesforce_origin >> trash
pipeline = pipeline_builder.build().configure_for_environment(salesforce)
sdc_executor.add_pipeline(pipeline)
logger.info('Starting pipeline and snapshot')
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True, timeout_sec=TIMEOUT).snapshot
# There should be a single row with Id and NewValue fields. For SOAP API, NewValue should be a DATETIME, for
# Bulk API it's a STRING
assert len(snapshot[salesforce_origin].output) == 1
assert snapshot[salesforce_origin].output[0].field['Id']
if api == 'soap':
assert snapshot[salesforce_origin].output[0].field['NewValue'].type == 'DATETIME'
else:
assert snapshot[salesforce_origin].output[0].field['NewValue'].type == 'STRING'
finally:
if con and con['id']:
client.Contract.delete(con['id'])
if acc and acc['id']:
client.Account.delete(acc['id'])
@salesforce
def test_salesforce_origin_query_cdc_no_object(sdc_builder, sdc_executor, salesforce):
"""Test SDC-12378 - enabling CDC with blank object name ('get notifications for all objects') was causing
initial query to fail.
Create data using Salesforce client and then check if Salesforce origin receives them using snapshot.
The pipeline looks like:
salesforce_origin >> trash
Args:
sdc_builder (:py:class:`streamsets.testframework.Platform`): Platform instance
sdc_executor (:py:class:`streamsets.sdk.DataCollector`): Data Collector executor instance
salesforce (:py:class:`testframework.environments.SalesforceInstance`): Salesforce environment
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
query = ("SELECT Id, FirstName, LastName, Email, LeadSource FROM Contact "
"WHERE Id > '000000000000000' AND "
f'LastName = \'{STR_15_RANDOM}\' '
"ORDER BY Id")
salesforce_origin = pipeline_builder.add_stage('Salesforce', type='origin')
salesforce_origin.set_attributes(soql_query=query,
subscribe_for_notifications=True,
subscription_type=CDC,
change_data_capture_object='')
trash = pipeline_builder.add_stage('Trash')
salesforce_origin >> trash
pipeline = pipeline_builder.build().configure_for_environment(salesforce)
sdc_executor.add_pipeline(pipeline)
verify_by_snapshot(sdc_executor, pipeline, salesforce_origin, DATA_TO_INSERT, salesforce, DATA_TO_INSERT)
def find_dataset(client, name):
"""Utility method to find a dataset by name
Args:
client (:py:class:`simple_salesforce.Salesforce`): Salesforce client
name (:obj:`str`): Dataset name
Returns:
(:obj:`str`) Record ID of dataset
(:obj:`str`) Current Version ID of dataset
"""
result = client.restful('wave/datasets')
for dataset in result['datasets']:
if dataset['name'] == name and 'currentVersionId' in dataset:
return dataset['id'], dataset['currentVersionId']
return None, None
@salesforce
def test_einstein_analytics_destination(sdc_builder, sdc_executor, salesforce):
"""Basic test for Einstein Analytics destination. Write some data and check that it's there
The pipeline looks like:
dev_raw_data_source >> delay >> einstein_analytics_destination
Args:
sdc_builder (:py:class:`streamsets.testframework.Platform`): Platform instance
sdc_executor (:py:class:`streamsets.sdk.DataCollector`): Data Collector executor instance
salesforce (:py:class:`testframework.environments.SalesforceInstance`): Salesforce environment
"""
client = salesforce.client
id = None
try:
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = get_dev_raw_data_source(pipeline_builder, CSV_DATA_TO_INSERT)
# Delay so that we can stop the pipeline after a single batch is processed
delay = pipeline_builder.add_stage('Delay')
delay.delay_between_batches = 5*1000
analytics_destination = pipeline_builder.add_stage('Einstein Analytics', type='destination')
edgemart_alias = get_random_string(string.ascii_letters, 10).lower()
# Explicitly set auth credentials since Salesforce environment doesn't know about Einstein Analytics destination
analytics_destination.set_attributes(edgemart_alias=edgemart_alias,
username=salesforce.username,
password=<PASSWORD>,
auth_endpoint='test.salesforce.com')
dev_raw_data_source >> delay >> analytics_destination
pipeline = pipeline_builder.build().configure_for_environment(salesforce)
sdc_executor.add_pipeline(pipeline)
# Now the pipeline will write data to Einstein Analytics
logger.info('Starting Einstein Analytics destination pipeline and waiting for it to produce records ...')
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# Einstein Analytics data load is asynchronous, so poll until it's done
logger.info('Looking for dataset in Einstein Analytics')
end_time = datetime.now() + timedelta(seconds=60)
id = None
while id is None and datetime.now() < end_time:
sleep(5)
id, currentVersionId = find_dataset(client, edgemart_alias)
# Make sure we found a dataset and didn't time out!
assert not(id is None)
# Now query the data from Einstein Analytics using SAQL
# Build the load statement
load = f'q = load \"{id}/{currentVersionId}\";'
# Build the identity projection - e.g.
# q = foreach q generate Email as Email, FirstName as FirstName, LastName as LastName, LeadSource as LeadSource;
field_list = []
for key in DATA_TO_INSERT[0]:
field_list.append(f'{key} as {key}')
projection = 'q = foreach q generate ' + ', '.join(field_list) + ';'
# Ensure consistent ordering
order_key = 'Email'
ordering = f'q = order q by {order_key};'
logger.info('Querying Einstein Analytics')
response = client.restful('wave/query', method='POST', json={'query': load + projection + ordering})
assert sorted(DATA_TO_INSERT, key=itemgetter(order_key)) == response['results']['records']
finally:
if id:
# simple_salesforce assumes there will be a JSON response,
# but DELETE returns 204 with no response
# See https://github.com/simple-salesforce/simple-salesforce/issues/327
try:
logger.info('Deleting dataset in Einstein Analytics')
client.restful(f'wave/datasets/{id}', method='DELETE')
except JSONDecodeError:
pass
def verify_cdc_snapshot(snapshot, stage, inserted_data):
# CDC returns more than just the record fields, so verify_snapshot isn't so useful
assert len(snapshot[stage].output) == 1
assert snapshot[stage].output[0].header.values['salesforce.cdc.recordIds']
assert snapshot[stage].output[0].field['Email'] == inserted_data['Email']
# CDC returns nested compound fields
assert snapshot[stage].output[0].field['Name']['FirstName'] == inserted_data['FirstName']
assert snapshot[stage].output[0].field['Name']['LastName'] == inserted_data['LastName']
@salesforce
@pytest.mark.parametrize(('subscription_type'), [
PUSH_TOPIC,
CDC
])
@pytest.mark.parametrize(('api'), [
| |
# Assignment 3: Combustor Design
## Introduction
The global desire to reduce greenhouse gas emissions is the main reason for the interest in the use of hydrogen for power generation.
Although hydrogen shows to be a promising solution, there are many challenges that need to be solved.
One of the challenges focuses on the use of hydrogen as a fuel in gas turbines.
In gas turbines hydrogen could replace natural gas as a fuel in the combustor. Unfortunately, this is accompanied with a technical challenge which deals with an important property in premixed combustion: the flame speed. The flame speed of hydrogen is an order of magnitude higher than natural gas due to the highly reactive nature of hydrogen. As a result a hydrogen flame is more prone to flashback than a natural gas flame.
Flame flashback is the undesired upstream propagation of a flame into the premix section of a combustor. Flashback occurs when the flame speed is higher than the velocity of the incoming fresh mixture. This could cause severe equipment damage and turbine shutdown. Adjustments to traditional combustors are required in order to guarantee safe operation when using hydrogen as fuel.
To this end the students are asked to investigate the use of hydrogen, natural gas and a blend thereof in gas turbines. The first part will focus on the impact of the different fuels on the combustor geometry. Finally, we will have a closer look at the influence of different fuels on the $CO_2$ and $NO_x$ emissions. For simplicty, it is assumed that natural gas consists purely of methane ($CH_4$).
## Tasks
### Diameter of the combustor
A gas turbine has a power output of 100 MW. The combustion section consists of 8 can combustors. Each can combustor is, for the sake of simplicty, represented by a tube with a diameter $D$.<br>
The inlet temperature $T_2$ of the compressor is 293 K and the inlet pressure $p_2$ is 101325 Pa. To prevent damage of the turbine blades a turbine inlet temperature (TIT) of 1800 K is desired. Furthermore, assume that the specific heat of the fluid is constant through the compressor, i.e. specific heat capacity $c_{p,c}$=1.4 and a heat capacity ratio $\gamma_c$=1.4. The polytropic efficiency of the compressor and turbine are 0.90 and 0.85, respectively.
The pressure ratio over the compressor will depend on your studentID:
PR = 10 if (numpy.mod(studentID, 2) + 1) == 1<br>
PR = 20 if (numpy.mod(studentID, 2) + 1) == 2
Assume the TIT to be equal to the temperature of the flame inside the combustor. The flame temperature depends on the equivalence ratio ($\phi$), the hydrogen volume percentage of the fuel ($H_2\%$) and the combustor inlet temperature and pressure. For now consider the fuel to consist of pure natural gas ($H_2\%=0$). Note that the equivalence ratio is given by:
\begin{align}
\phi = \frac{\frac{m_{fuel}}{m_{air}}}{(\frac{m_{fuel}}{m_{air}})_{stoich}}
\end{align}
**1. Calculate the inlet temperature $T_3$ and inlet pressure $p_3$ of the combustor and determine the required equivalence ratio (adjust PART A and PART B and run the code), so that the TIT specification is met.** <br>
Inside the combustor the flow is turbulent. Turbulence causes an increase in the flame speed, so that the turbulent flame speed $S_T \approx 10 S_L$.
**2. With the equivalence ratio determined in the previous question, calculate the total mass flow rate ($\dot{m}_{tot}$) through the gas turbine and the maximal diameter $D$ of a single combustor tube, so that flashback is prevented. Adjust PART A, PART B, PART C and PART D in the code and run it again. Report the steps you have taken. <br>
Is there also a minimum diameter? If so, no calculation required, discuss what could be the reason for the necessity of a minimal diameter of the combustor tube.**
The combustion of methane is represented by the reaction: $CH_4 + 2 (O_2 + 3.76 N_2) \rightarrow CO_2 + 2 H_2O + 7.52 N_2$ <br>
**3. Use the above reaction equation and the definition of $\phi$ to find the mass flow rate of the fuel $\dot{m}_{fuel}$.** <br>
**4. Calculate the total heat input using $\dot{m}_{fuel}$ and calculate the efficiency of the complete cycle.** <br>
**5. Repeat tasks 1-4 for a fuel consisting of $50\%H_2$/$50\%CH_4$ and $100\%H_2$. Discuss the effect of the addition of hydrogen to the fuel on the combustor geometry and cycle performance.**
### $CO_2$ and $NO_x$ emissions
**6. A gas turbine manufacturer claims that their gas turbines can be fired with a hydrogen content of 30%. Discuss wheter this could be regarded an achievement (use the top plot in Figure 5).**
**7. Consider an equivalence ratio $\phi=0.5$. Regarding emissions, discuss the advantages and disadvantages of increasing the hydrogen content of the fuel. Adjust PART A and use Figure 5.**
### Bonus assignment
For simplicty, it was assumed that natural gas does consist of pure methane. In reality, it could be a mix of methane, higher hydrocarbons and nitrogen. <br>An example is Dutch Natural Gas (DNG), which consists of $80\%CH_4$, $5\%C_2H_6$ and $15\%N_2$.
**Repeat tasks 1-4 for a fuel consisting of $50\%H_2$/$50\%DNG$. <br> Hint1: Nitrogen does not participate in the reaction. <br> Hint2: This requires more adjustment of the code than just PARTS A, B, C, D.**
## Code
Two properties of importance in this assignment are the laminar flame speed $S_L$ and the adiabtic flame temperature $T_{ad}$ of a mixture. These properties can be determined by solving the equations for continuity, momentum, species and energy in one dimension. Fortunetaly, we do not need to solve these equations by hand, instead a chemical kinetics software (Cantera) is used to solve these equations by running a simulation. The simulation is illustrated in the sketch below. Keep in mind that the simulation can take some time to complete.
For more information about Cantera visit: https://cantera.org/. <br>
For more background information regarding the 1D flame simulation visti: https://cantera.org/science/flames.html

#%% Load required packages
import sys
import cantera as ct
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
#%% Constants
R_gas_mol = 8314 # Universal gas constant [units: J*K^-1*kmol^-1]
R_gas_mass = 287 # universal gas constant [units: J*K^-1*kg^-1]
#%% Start
# Power output of turbine
power_output = 100 # units: MW
power_output*=1e6
# Compressor and turbine polytropic efficiencies
etap_c = 1
etap_t = 1
# Pressure ratio
PR = 10
# Compressor inlet temperature and pressure
T2 = 293.15 # units: K
p2 = 101325 # units: Pa
# Heat capacity ratio of air at T=293.15 K
gam_c = 1.4
# Compressor stage
# Specific heat capacity (heat capacity per unit mass) of mixture in compressor
cp_c = R_gas_mass*gam_c/(gam_c-1)
# cp_c = 1006 # units: J.kg^-1.K^-1
# cv_c = 717 # units: J.kg^-1.K^-1
# Molar mass of species [units: kg*kmol^-1]
M_H = 1.008
M_C = 12.011
M_N = 14.007
M_O = 15.999
M_H2 = M_H*2
M_CH4 = M_C + M_H*4
M_CO2 = M_C + M_O*4
M_O2 = M_O*2
M_N2 = M_N*2
# Define volume fractions of species in air [units: -]
f_O2 = 0.21
f_N2 = 0.79
########## PART A: ADJUST CODE HERE ##########
# Equivalence ratios
phis = [None, None, None] # Set equivalence ratios ranging from 0.4 to 0.8
# Hydrogen percentages
H2_percentages = [None, None, None] # Set hydrogen volume percentages of the fuel ranging from 0 to 100
################# END PART A ##################
# Define colors to make distinction between different mixtures based on hydrogen percentage
colors = cm.rainbow(np.linspace(0, 1, len(H2_percentages)))
#%% Premixed flame object
class mixture_class:
def __init__(self, phi, H2_percentage, T_u=293.15, p_u=101325):
# Color and label for plots
self.color = colors[H2_percentages.index(H2_percentage)]
self.label = str(int(H2_percentage)) + r'$\% H_2$'
# Temperature and pressure of the unburnt mixture
self.T_u = T_u # units: K
self.p_u = p_u # units: Pa
# Equivalence ratio
self.phi = phi
# Hydrogen percentage of fuel
self.H2_percentage = H2_percentage
# DNG percentage of fuel
self.CH4_percentage = 100 - self.H2_percentage
# Volume fractions of fuel
self.f_H2 = self.H2_percentage/100
self.f_CH4 = self.CH4_percentage/100
# Mass densities of fuel species
rho_H2 = M_H2*self.p_u/(self.T_u*R_gas_mol)
rho_CH4 = M_CH4*self.p_u/(self.T_u*R_gas_mol)
# Check if volume fractions of fuel and air are correct
check_air = f_O2 + f_N2
check_fuel = self.f_H2 + self.f_CH4
if | |
<filename>cw/skin/convert.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import struct
import threading
import cw
class Converter(threading.Thread):
def __init__(self, exe):
threading.Thread.__init__(self)
self.maximum = 100
self.curnum = 0
self.message = u"変換を開始しています..."
self.failure = False
self.complete = False
self.errormessage = ""
self.res = None
self.version = (1, 2, 8, 0)
self.init(exe)
def __del__(self):
self.dispose()
def dispose(self):
if self.res:
self.res.dispose()
def init(self, exe):
if self.res:
self.res.dispose()
self.res = None
self.exe = exe
if self.exe:
with open(self.exe, "rb") as f:
self.exebinary = f.read()
f.close()
self.res = cw.skin.win32res.Win32Res(self.exe)
self.version = self.res.get_rcdata(cw.skin.win32res.RT_VERSION, 1)
self.version = struct.Struct("<HHHH").unpack(self.version[48:56])
self.version = (self.version[1], self.version[0], self.version[3], self.version[2])
self.datadir = self.find_datadir()
self.scenariodir = self.find_scenariodir()
self.yadodir = self.find_yadodir()
self.skintype = self.find_type()
self.initialcash = self.find_initialcash()
self.data = cw.data.xml2etree(u"Data/SkinBase/Skin.xml")
self.data.find("Property/Name").text = self.find_skinname()
self.data.find("Property/Type").text = self.skintype
self.data.find("Property/Author").text = self.find_author()
self.data.find("Property/Description").text = cw.util.encodewrap(self.find_description())
self.data.find("Property/InitialCash").text = str(self.initialcash)
self.actioncard = self._get_resources(u"ActionCard")
self.gameover = self._get_resources(u"GameOver")
self.scenario = self._get_resources(u"Scenario")
self.title = self._get_resources(u"Title")
self.yado = self._get_resources(u"Yado")
self.specialcard = self._get_resources(u"SpecialCard")
self._get_features()
self._get_sounds()
self._get_messages()
self._get_cards()
self.adventurersinn = None
self._get_bgs()
self.partyinfo_res = None
self._get_partyinfo()
def _get_resources(self, dpath):
dpath = cw.util.join_paths(u"Data/SkinBase/Resource/Xml/", dpath)
rsrc = {}
for path in os.listdir(dpath):
if path.lower().endswith(".xml"):
name = cw.util.splitext(path)[0]
path = cw.util.join_paths(dpath, path)
rsrc[name] = cw.data.xml2etree(path)
return rsrc
def _write_data(self, dpath, table):
for data in table.values():
data.fpath = cw.util.join_paths(dpath, cw.util.relpath(data.fpath, u"Data/SkinBase/"))
data.write()
def find_skinname(self):
if self.exe:
exebasename = os.path.basename(self.exe)
return cw.util.splitext(exebasename)[0]
else:
return "Default"
def find_description(self):
if self.exe:
exebasename = os.path.basename(self.exe)
return (u"%sをベースに自動生成したスキン。") % exebasename
else:
return u""
def find_datadir(self):
if self.exe and ((1, 2, 8, 0) <= self.version and self.version <= (1, 3, 99, 99)):
key = "\\Midi\\DefReset.mid"
index = self.exebinary.find(key)
try:
return unicode(self.exebinary[index-4:index], cw.MBCS)
except:
pass
return u"Data"
def find_scenariodir(self):
if self.exe and ((1, 2, 8, 0) <= self.version and self.version <= (1, 3, 99, 99)):
key = "\0\\\0\\\0\\Summary.wsm\0\\\0\\\0.wid\0"
index = self.exebinary.find(key)
try:
index = index + len(key)
return unicode(self.exebinary[index:index+8], cw.MBCS)
except:
pass
return u"Scenario"
def find_yadodir(self):
if self.exe and ((1, 2, 8, 0) <= self.version and self.version <= (1, 3, 99, 99)):
key = "\\\0\\Environment.wyd\0"
index = self.exebinary.find(key)
try:
index = index + len(key)
return unicode(self.exebinary[index-len(key)-4:index-len(key)], cw.MBCS)
except:
pass
return u"Yado"
def find_type(self):
if self.exe:
fname = os.path.basename(self.exe).lower()
fname = cw.util.splitext(fname)[0]
if fname == "s_c_wirth":
return "School"
elif fname == "modernwirth":
return "Modern"
elif fname == "darkwirth":
return "Monsters"
elif fname == "oedowirth":
return "Oedo"
elif 0 <= os.path.dirname(self.exe).lower().find("sfv"):
return "ScienceFiction"
return u"MedievalFantasy"
def find_author(self):
return u""
def find_initialcash(self):
prop = cw.header.GetProperty(u"Data/SkinBase/Skin.xml")
cash = int(prop.properties.get(u"InitialCash", "4000"))
if not self.exe or not ((1, 2, 8, 0) <= self.version and self.version <= (1, 3, 99, 99)):
return cash
if len(self.exebinary) < 0x31d97+4:
return cash
return struct.unpack("<I", self.exebinary[0x31d97:0x31d97+4])[0]
def _get_features(self):
# バイナリ断片を手がかりにして特性値を探す。
if not self.exe or not ((1, 2, 8, 0) <= self.version and self.version <= (1, 3, 99, 99)):
return
key = "TStatusItem\x81\x89" # "TStatusItem♂"
index = self.exebinary.find(key) + len(key) - len("\x81\x89")
physical = struct.Struct("<hhhhhh")
mental = struct.Struct("<hhhhh")
try:
def set_params(data, index, isnature, slist=("aggressive", "cautious", "brave", "cheerful", "trickish"), sperb=2.0):
# 特性名
n = self.exebinary[index:index+20]
index += 20
i = n.find("\0")
if 0 <= i:
name = n[:i]
else:
name = n
data.find("./Name").text = unicode(name, cw.MBCS).strip(u" ")
# 身体能力
p = physical.unpack(self.exebinary[index:index+2*6])
index += 2*6
e = data.find("./Physical")
if isnature:
e.set("dex", str(p[0] - 6))
e.set("agl", str(p[1] - 6))
e.set("int", str(p[2] - 6))
e.set("str", str(p[3] - 6))
e.set("vit", str(p[4] - 6))
e.set("min", str(p[5] - 6))
else:
e.set("dex", str(p[0]))
e.set("agl", str(p[1]))
e.set("int", str(p[2]))
e.set("str", str(p[3]))
e.set("vit", str(p[4]))
e.set("min", str(p[5]))
# 精神能力
p = mental.unpack(self.exebinary[index:index+2*5])
index += 2*5
e = data.find("./Mental")
e.set(slist[0], str(p[0] / sperb))
e.set(slist[1], str(p[3] / sperb))
e.set(slist[2], str(p[2] / sperb))
e.set(slist[3], str(p[1] / sperb))
e.set(slist[4], str(p[4] / sperb))
return index
key = "\x00\x49\x4D\x41\x47\x45\x5F\x46\x41\x54\x48\x45\x52\x00\x49\x4D\x41\x47\x45\x5F\x4D\x4F\x54\x48\x45\x52\x00\x81\x40\x81\x40\x81\x40\x81\x40\x81\x40\x81\x40\x81\x40\x81\x40\x00\x00\x81\x51\x00\x81\x51\x00\x81\x51\x00\x81\x51\x00\x81\x40\x00"
index2 = self.exebinary.find(key)
if 0 <= index2:
index2 += len(key)
# 大人に付加される「熟練」クーポン
skillful, index2 = self._get_text(index2)
e = self.data.find("Periods/Period[3]/Coupons/Coupon")
if not e is None:
e.text = skillful
# 老人に付加される「老獪」クーポン
foxy, index2 = self._get_text(index2)
e = self.data.find("Periods/Period[4]/Coupons/Coupon")
if not e is None:
e.text = foxy
for e in self.data.getfind("Sexes"):
index = set_params(e, index, False)
for e in self.data.getfind("Periods"):
index = set_params(e, index, False)
# 使用されていない年代「古老」を飛ばす
index += 20 + 2*6 + 2*5
for e in self.data.getfind("Natures"):
index = set_params(e, index, True)
for e in self.data.getfind("Makings"):
index = set_params(e, index, False)
# デバグ宿で簡易生成を行う際の能力型
for e in self.data.getfind("SampleTypes"):
index = set_params(e, index, True, sperb=1.0)
# 型の派生元を設定
# 英明型 <- 標準型,万能型
e = self.data.find("Natures/Nature[8]/BaseNatures/BaseNature[1]")
e.text = self.data.find("Natures/Nature[1]/Name").text
e = self.data.find("Natures/Nature[8]/BaseNatures/BaseNature[2]")
e.text = self.data.find("Natures/Nature[2]/Name").text
# 無双型 <- 勇将型,豪傑型
e = self.data.find("Natures/Nature[9]/BaseNatures/BaseNature[1]")
e.text = self.data.find("Natures/Nature[3]/Name").text
e = self.data.find("Natures/Nature[9]/BaseNatures/BaseNature[2]")
e.text = self.data.find("Natures/Nature[4]/Name").text
# 天才型 <- 知将型,策士型
e = self.data.find("Natures/Nature[10]/BaseNatures/BaseNature[1]")
e.text = self.data.find("Natures/Nature[5]/Name").text
e = self.data.find("Natures/Nature[10]/BaseNatures/BaseNature[2]")
e.text = self.data.find("Natures/Nature[6]/Name").text
# 解説文
entrydlg = self.res.get_tpf0form("TENTRYDLG")
if entrydlg:
typesheet = entrydlg["EntryDlg"]["PageControl"]["TypeSheet"]
# 標準型
e = self.data.find("Natures/Nature[1]/Description")
e.text = typesheet["Type3Label"]["Caption"]
# 万能型
e = self.data.find("Natures/Nature[2]/Description")
e.text = typesheet["Type2Label"]["Caption"]
# 勇将型
e = self.data.find("Natures/Nature[3]/Description")
e.text = typesheet["Type1Label"]["Caption"]
# 豪傑型
e = self.data.find("Natures/Nature[4]/Description")
e.text = typesheet["Type0Label"]["Caption"]
# 知将型
e = self.data.find("Natures/Nature[5]/Description")
e.text = typesheet["Type4Label"]["Caption"]
# 策士型
e = self.data.find("Natures/Nature[6]/Description")
e.text = typesheet["Type5Label"]["Caption"]
except Exception:
cw.util.print_ex()
def _get_sounds(self):
# バイナリ断片を手がかりにして音声ファイル名を探す。
if not self.exe or not ((1, 2, 8, 0) <= self.version and self.version <= (1, 3, 99, 99)):
return
try:
sounds = self.data.getfind("Sounds")
def get_keybefore(e, key, length, less=0):
index = self.exebinary.find(key)
if 0 <= index:
index -= less
e.text = unicode(self.exebinary[index-length:index], cw.MBCS)
def get_keyafter(e, key, length, than=0):
index = self.exebinary.find(key)
if 0 <= index:
index += len(key)
index += than
e.text = unicode(self.exebinary[index:index+length], cw.MBCS)
# システム・エラー
# ".wav\0は、行動不能です。"
key = <KEY>"
get_keybefore(sounds[0], key, 16)
# システム・クリック
get_keybefore(sounds[1], key, 18, less=16+5)
# システム・シグナル
# ".wav\0本アプリケーションは『小さいフォント』に対応しています。"
key = ".wav\0\x96\x7B\x83\x41\x83\x76\x83\x8A\x83\x50\x81\x5B\x83\x56\x83\x87\x83\x93\x82\xCD\x81\x77\x8F\xAC\x82\xB3\x82\xA2\x83\x74\x83\x48\x83\x93\x83\x67\x81\x78\x82\xC9\x91\xCE\x89\x9E\x82\xB5\x82\xC4\x82\xA2\x82\xDC\x82\xB7\x81\x42"
get_keybefore(sounds[2], key, 18)
# システム・初期化
get_keybefore(sounds[6], key, 16, less=18+8)
# システム・回避
# "死者有効\0抵抗有効\0"
key = <KEY>"
get_keyafter(sounds[3], key, 14)
# システム・無効
get_keyafter(sounds[11], key, 14, than=14+5)
# システム・改ページ
key = ".wav\0CHECK_FIXED\0CHECK_TARGET\0"
get_keybefore(sounds[4], key, 18)
# システム・収穫
# "TMainWindow\0TBookDlg\0状態\0"
key = <KEY>"
get_keybefore(sounds[5], key, 14)
# システム・戦闘
key = ".wav\0Encounter\0\x30\0\0Round\x20\0"
get_keybefore(sounds[7], key, 14)
# システム・装備
# "\0_2\0_3\0_4\0_5\0_6\0異常発生\0"
key = <KEY>"
get_keyafter(sounds[8], key, 14)
# 効果(混乱)
get_keyafter(sounds[12], key, 12, than=41)
# 効果(呪縛)
key = <KEY>"
get_keyafter(sounds[13], key, 12, than=75)
# システム・逃走
key = ".wav\0TITLE_CARD1\0TITLE_CARD1\0TITLE_CARD2\0"
get_keybefore(sounds[9], key, 14, less=16+5)
# システム・破棄
# "\0を捨てます。よろしいですか?\0"
key = <KEY>"
get_keyafter(sounds[10], key, 14)
except Exception:
cw.util.print_ex()
def _get_partyinfo(self):
if not self.exe or not ((1, 2, 8, 0) <= self.version and self.version <= (1, 3, 99, 99)):
return
try:
key = "\0IMAGE_COMMAND3\0"
index = self.exebinary.find(key)
if 0 <= index:
index += len(key) + 98
s = unicode(self.exebinary[index:index+12], cw.MBCS)
if s <> "IMAGE_FATHER":
self.partyinfo_res = s
except Exception:
cw.util.print_ex()
def _get_cards(self):
if not self.exe or not ((1, 2, 8, 0) <= self.version and self.version <= (1, 3, 99, 99)):
return
try:
key = "\0CARD_SKILL\0CARD_ACTION\0IMAGE_ACTION\0"
index = self.exebinary.find(key)
if 0 <= index:
index += len(key)
# アクションカード
# 名前・解説・音声1・音声2・標準キーコード
# の順で文字列を取得する
def get_actioncard(cardkey, index, keycodenum):
name, index = self._get_text(index, True)
desc, index = self._get_text(index, True)
sound1, index = self._get_text(index)
sound2, index = self._get_text(index)
keycodes = []
for _i in xrange(0, keycodenum):
keycode, index = self._get_text(index)
keycodes.append(keycode)
data = self.actioncard[cardkey]
data.find("Property/Name").text = name
data.find("Property/Description").text = desc
data.find("Property/SoundPath").text = sound1
data.find("Property/SoundPath2").text = sound2
data.find("Property/KeyCodes").text = cw.util.encodewrap("\n".join(keycodes))
return index
# カード交換
index = get_actioncard("00_Exchange", index, 1)
# 攻撃
index = get_actioncard("01_Attack", index, 1)
# 渾身の一撃
index = get_actioncard("02_PowerfulAttack", index, 1)
# 会心の一撃
index = get_actioncard("03_CriticalAttack", index, 1)
# フェイント
index = get_actioncard("04_Feint", index, 2)
# 防御
index = get_actioncard("05_Defense", index, 1)
# 見切り
index = get_actioncard("06_Distance", index, 1)
# 混乱
index = get_actioncard("-1_Confuse", index, 1)
# 逃走
index = get_actioncard("07_Runaway", index, 1)
key = ".wav\0Encounter\0\x30\0\0Round\x20\0"
index = self.exebinary.find(key)
if 0 <= index:
# 特殊エリアのメニューカード
# 名前、解説、イメージのリソース名
# の順で文字列を取得する
index += len(key)
def get_menucard(area, index):
name, index = self._get_text(index, True)
desc, index = self._get_text(index, True)
_image, index = self._get_text(index)
for data in area:
e = data[0].find("MenuCards/*[%s]" % (data[1]))
e.find("Property/Name").text = name
e.find("Property/Description").text = desc
return index
# スタート
index = get_menucard([(self.title["01_Title"], 1)], index)
# 終了
index = get_menucard([(self.title["01_Title"], 2)], index)
# 宿帳を開く
index = get_menucard([(self.yado["01_Yado"], 1),
(self.yado["02_Yado2"], 1),
(self.yado["03_YadoInitial"], 1)], index)
# 冒険の再開
index = get_menucard([(self.yado["01_Yado"], 3),
(self.yado["02_Yado2"], 3)], index)
# 貼紙を見る
index = | |
import itertools
from typing import Any, Callable, Collection, Dict, List
import numpy as np
from active_reward_learning.envs import TabularMDP
from active_reward_learning.reward_models.basic_gp_reward_model import (
BasicGPRewardModel,
)
from active_reward_learning.reward_models.query import ComparisonQueryLinear
from active_reward_learning.solvers import BaseSolver
from active_reward_learning.util.helpers import (
get_dict_assert,
get_dict_default,
pdf_multivariate_gauss,
)
def get_policy_W(gp_reward_model: BasicGPRewardModel, policy_i: int):
"""
Get state visitation frequencies of a single policy.
"""
assert gp_reward_model.candidate_policies is not None
policy = gp_reward_model.candidate_policies[policy_i]
# we just need the covariance of rewards that are supported in W
if gp_reward_model.use_trajectories_to_evaluate_policy:
freq = gp_reward_model.state_visitation_frequencies[policy_i]
all_states = [
np.fromstring(k, dtype=gp_reward_model.state_repr_dtype)
for k in freq.keys()
]
W = np.array([freq[repr.tostring()] for repr in all_states])
else:
assert gp_reward_model.environment_is_tabular
W = gp_reward_model.env.get_return_trafo_for_policy(policy)
all_states = gp_reward_model.env.get_all_states_repr()
support = W != 0
states_support = [s for i, s in enumerate(all_states) if support[i]]
return W, support, states_support, all_states
def get_multiple_policies_W(
gp_reward_model: BasicGPRewardModel, candidate_policy_indices: Collection[int]
):
"""
Get state visitation frequencies of multiple policies.
"""
assert candidate_policy_indices is not None
assert len(candidate_policy_indices) >= 2
W_list = []
states_in_W_repr_list = []
all_states = set()
for policy_i in candidate_policy_indices:
W, support, states_support, states_in_W = get_policy_W(
gp_reward_model, policy_i
)
states_in_W_repr = [state_repr.tostring() for state_repr in states_in_W]
all_states.update(states_in_W_repr)
W_list.append(W)
states_in_W_repr_list.append(states_in_W_repr)
N_states = len(all_states)
all_states_idx = dict(zip(list(all_states), range(N_states)))
W_list_new = []
support_any = np.zeros(N_states, dtype=np.bool)
states_support = [None] * N_states
for W, states_in_W_repr in zip(W_list, states_in_W_repr_list):
W_new = np.zeros(N_states)
for W_val, state_repr in zip(W, states_in_W_repr):
if W_val > 0:
i = all_states_idx[state_repr]
W_new[i] = W_val
if not support_any[i]:
support_any[i] = True
states_support[i] = np.fromstring(
state_repr, dtype=gp_reward_model.state_repr_dtype
)
W_list_new.append(W_new)
states_support = [s for s in states_support if s is not None]
return W_list_new, support_any, states_support
class TwoStepGPRewardModel(BasicGPRewardModel):
"""
Implements a GP reward model with a two-step acquisition function.
Points to query are selected by:
1. selecting a policy to learn about
2. select a point that is informative about the policy
Steps 1 and 2 can be customized by specifying the `policy_selection_function`
and the `state_selection_function`.
"""
def __init__(
self,
env: TabularMDP,
kernel_function,
solver: BaseSolver,
policy_selection_function: Callable[
[List[np.ndarray], BasicGPRewardModel, Dict[str, Any]], List[int]
],
state_selection_function: Callable[
[np.ndarray, BasicGPRewardModel, Dict[str, Any]], int
],
obs_var: float = 0,
arguments: Dict[str, Any] = {},
**kwargs,
):
acquisition_function = self.get_acquisition_function(
policy_selection_function, state_selection_function, arguments
)
super().__init__(
env, acquisition_function, kernel_function, solver, obs_var, **kwargs
)
def get_acquisition_function(
self,
policy_selection_function: Callable[
[List[np.ndarray], BasicGPRewardModel, Dict[str, Any]], List[int]
],
state_selection_function: Callable[
[np.ndarray, BasicGPRewardModel, Dict[str, Any]], int
],
arguments: Dict[str, Any],
) -> Callable[[BasicGPRewardModel], int]:
"""
Return an acquisition function that first selects a policy / set of policies
according to the `policy_selection_function` and then a state to query according
to the `state_selection_function`. The acquisition function can then simply
be used with a `BasicGPRewardModel`.
"""
def acquisition_function(gp_reward_model: BasicGPRewardModel) -> int:
assert gp_reward_model.candidate_policies is not None
policies_idx = policy_selection_function(
gp_reward_model.candidate_policies, gp_reward_model, arguments
)
state = state_selection_function(policies_idx, gp_reward_model, arguments)
return state
return acquisition_function
def policy_selection_none(
candidate_policies: List[np.ndarray],
gp_reward_model: BasicGPRewardModel,
arguments: Dict[str, Any] = {},
) -> List[int]:
return list(range(len(candidate_policies)))
def policy_selection_maximum_regret(
candidate_policies: List[np.ndarray],
gp_reward_model: BasicGPRewardModel,
arguments: Dict[str, Any] = {},
) -> List[int]:
"""
Implementation of [1].
[1] Wilde, Nils, <NAME>, and <NAME>.
"Active preference learning using maximum regret."
https://arxiv.org/pdf/2005.04067.pdf
"""
assert gp_reward_model.candidate_rewards is not None
assert gp_reward_model.use_comparisons
if gp_reward_model.environment_is_tabular:
raise NotImplementedError(
"Maximum regret acquisition is not implemented for tabular environments"
)
simple_model = get_dict_default(arguments, "simple_model", False)
gp = gp_reward_model.gp_model
if simple_model:
uncertainty_p = get_dict_assert(arguments, "uncertainty_p")
assert 0.5 < uncertainty_p < 1, uncertainty_p
reward_probs = np.ones(len(gp_reward_model.candidate_rewards))
for x, y in zip(gp.X_list[1:], gp.Y_list[1:]): # first one is grounding
assert y == 1 or y == -1
for i, reward_w in enumerate(gp_reward_model.candidate_rewards):
features_1, features_2 = x
reward_1 = np.dot(features_1, reward_w)
reward_2 = np.dot(features_2, reward_w)
if (reward_1 > reward_2 and y == 1) or (
reward_1 <= reward_2 and y == -1
):
reward_probs[i] *= uncertainty_p
else:
reward_probs[i] *= 1 - uncertainty_p
reward_probs /= np.sum(reward_probs)
else:
mu = gp.linear_predictive_mean
cov = gp.linear_predictive_cov
reward_probs = []
for reward_w in gp_reward_model.candidate_rewards:
reward_prob = pdf_multivariate_gauss(reward_w, mu, cov)
reward_probs.append(reward_prob)
max_reg = -float("inf")
best_ij = [0, 0]
for query in gp_reward_model.candidate_queries:
assert isinstance(query, ComparisonQueryLinear)
i, j = query.info["policy_i1"], query.info["policy_i2"]
features_i, features_j = query.gp_repr_list
reward_i = gp_reward_model.candidate_rewards[i]
reward_j = gp_reward_model.candidate_rewards[j]
p_i = reward_probs[i]
p_j = reward_probs[j]
G_pi_i_w_i = np.dot(features_i, reward_i)
G_pi_j_w_j = np.dot(features_j, reward_j)
G_pi_i_w_j = np.dot(features_i, reward_j)
G_pi_j_w_i = np.dot(features_j, reward_i)
# old implementation (wrong)
# regret = - p_i * p_j * (G_pi_i_w_j / G_pi_j_w_j + G_pi_j_w_i / G_pi_i_w_i)
# ratio based regret
# regret = p_i * p_j * (2 - G_pi_i_w_j / G_pi_j_w_j - G_pi_j_w_i / G_pi_i_w_i)
# difference based
R1 = max(G_pi_j_w_j - G_pi_i_w_j, 0)
R2 = max(G_pi_i_w_i - G_pi_j_w_i, 0)
regret = p_i * p_j * (R1 + R2)
# prints for debugging
print(f"i: {i} j: {j}")
print(
f"\tG_pi_i_w_j: {G_pi_i_w_j:.2f} G_pi_j_w_j: {G_pi_j_w_j:.2f} "
f"G_pi_j_w_i: {G_pi_j_w_i:.2f} G_pi_i_w_i: {G_pi_i_w_i:.2f} "
)
print(f"\tR1: {R1:.2f} R2: {R2:.2f} p_i: {p_i} p_j: {p_j}")
print("\tregret", regret)
###
if regret > max_reg:
max_reg = regret
best_ij = [i, j]
print("max_reg", max_reg)
print("best_ij", best_ij)
return best_ij
def policy_selection_most_uncertain_pair_of_plausible_maximizers(
candidate_policies: List[np.ndarray],
gp_reward_model: BasicGPRewardModel,
arguments: Dict[str, Any] = {},
) -> List[int]:
"""
Selects two plausible maximizers that define the most uncertain direction.
First determines the set of plausible maximizer policies, by comparing their
confidence bounds. Then compares each pair of policies from the set of plausible
maximizers to find the pair that has the highest variance in the difference of their
expected returns.
"""
n_policies = get_dict_default(arguments, "n_policies", 2)
assert n_policies == 2
if gp_reward_model.use_trajectories_to_evaluate_policy is not None:
W_list, support_all, states_support = get_multiple_policies_W(
gp_reward_model, list(range(len(candidate_policies)))
)
mu_support, sigma_support = gp_reward_model.gp_model.predict_multiple(
states_support
)
else:
raise NotImplementedError()
# don't determine plausible maximizers
plausible_maximizers = policy_indices = np.arange(len(candidate_policies))
max_ij = [0, 1]
max_var = -float("inf")
for i, j in itertools.combinations(range(len(plausible_maximizers)), 2):
policy_i1, policy_i2 = policy_indices[i], policy_indices[j]
W_1, W_2 = W_list[policy_i1], W_list[policy_i2]
G_pi_diff_var = np.dot(
W_1[support_all] - W_2[support_all],
np.dot(sigma_support, W_1[support_all] - W_2[support_all]),
)
if G_pi_diff_var > max_var:
max_ij = [policy_i1, policy_i2]
max_var = G_pi_diff_var
# if tuple(max_ij) == (0, 1023):
# import ipdb; ipdb.set_trace()
print("max_ij", max_ij)
return max_ij
def state_selection_MI_diff(
policy_idx: List[int],
gp_reward_model: BasicGPRewardModel,
arguments: Dict[str, Any] = {},
) -> int:
"""
Select a state to query to maximize the mutual information between the states
reward function and the difference between the expected returns of the two
selected policies from the first step.
Note that maximizing I(G^\\pi, (s, r(s))) is equivalent to minimizing
H(G^\\pi | r(s)) (because H(G^\\pi, r(s)) is constant). Hence, maximizing
mutual information is approximated by 'hallucinating' reward observations
for each state and then finding the state that minimizes the conditional entropy.
"""
assert gp_reward_model.candidate_policies is not None
assert len(policy_idx) == 2
policy_i1 = policy_idx[0]
policy_i2 = policy_idx[1]
(W_1, W_2), support, states_support = get_multiple_policies_W(
gp_reward_model, (policy_i1, policy_i2)
)
min_var = float("inf")
min_var_states = [0]
(
candidate_queries_gp_repr,
candidate_queries_linear_combinations,
candidate_queries_gp_repr_idx,
) = gp_reward_model.get_candidate_queries_gp_repr()
for i in range(len(candidate_queries_gp_repr)):
gp_repr = candidate_queries_gp_repr[i]
linear_combination = candidate_queries_linear_combinations[i]
query = (gp_repr, linear_combination)
(
_,
sigma_support,
) = gp_reward_model.gp_model.make_temporary_observation_and_predict(
query, 0, states_support
)
var = np.dot(
W_1[support] - W_2[support],
np.dot(sigma_support, W_1[support] - W_2[support]),
)
idx = candidate_queries_gp_repr_idx[i]
if var < min_var:
min_var = var
min_var_states = [idx]
elif var == min_var:
min_var_states.append(idx)
return np.random.choice(min_var_states)
def state_selection_MI(
policy_idx: List[int],
gp_reward_model: BasicGPRewardModel,
arguments: Dict[str, Any] = {},
) -> int:
"""
Select a state to query to maximize the mutual information between the states
reward function and the expected return of the policy selected in the first step.
Note that maximizing I(G^\\pi, (s, r(s))) is equivalent to minimizing
H(G^\\pi | r(s)) (because H(G^\\pi, r(s)) is constant). Hence, maximizing
mutual information is approximated by 'hallucinating' reward observations
for each state and then finding the state that minimizes the conditional entropy.
"""
assert gp_reward_model.candidate_policies is not None
assert len(policy_idx) == 1
policy_i1 = policy_idx[0]
W_1, support, states_support, _ = get_policy_W(gp_reward_model, policy_i1)
min_H_cond = float("inf")
min_H_cond_states = [0]
(
candidate_queries_gp_repr,
candidate_queries_linear_combinations,
candidate_queries_gp_repr_idx,
) = gp_reward_model.get_candidate_queries_gp_repr()
print("len(candidate_queries_gp_repr)", len(candidate_queries_gp_repr))
for i in range(len(candidate_queries_gp_repr)):
gp_repr = candidate_queries_gp_repr[i]
linear_combination = candidate_queries_linear_combinations[i]
query = (gp_repr, linear_combination)
(
_,
sigma_support,
) = gp_reward_model.gp_model.make_temporary_observation_and_predict(
query, 0, states_support
)
var = np.dot(
W_1[support],
np.dot(sigma_support, W_1[support]),
)
# H_cond = 0.5 * np.log(2 * np.pi * np.e * var)
H_cond = var
| |
<filename>flexx/app/_asset.py<gh_stars>0
"""
Definition of the Asset class to represent JS and CSS assets, and a derived
class used as a container for one or more JSModule classes.
"""
import sys
import types
from urllib.request import urlopen, Request
from . import logger
# The pscript package does not deal with license headers,
# we add them to our assets here.
HEADER = 'Autogenerated code from Flexx. Code Subject to the BSD-2-clause license.'
HEADER = '/* %s */\n\n' % HEADER
url_starts = 'https://', 'http://'
# Although these two funcs are better off in modules.py, that causes circular refs.
def get_mod_name(ob):
""" Get the module name of an object (the name of a module object or
the name of the module in which the object is defined). Our naming
differs slighly from Python's in that the module in ``foo/bar/__init__.py``
would be named ``foo.bar.__init__``, which simplifies dependency handling
for Flexx. Note that such modules only occur if stuff is actually defined
in them.
"""
if not isinstance(ob, types.ModuleType):
ob = sys.modules[ob.__module__]
name = ob.__name__
if module_is_package(ob):
name += '.__init__'
return name
def module_is_package(module):
""" Get whether the given module represents a package.
"""
if hasattr(module, '__file__'):
if module.__file__.rsplit('.', 1)[0].endswith('__init__'):
return True
return False
def solve_dependencies(things, warn_missing=False):
""" Given a list of things, which each have a ``name`` and ``deps``
attribute, return a new list sorted to meet dependencies.
"""
assert isinstance(things, (tuple, list))
names = [thing.name for thing in things]
thingmap = dict([(n, t) for n, t in zip(names, things)])
for index in range(len(names)):
seen_names = set()
while True:
# Get thing name on this position, check if its new
name = names[index]
if name in seen_names:
raise RuntimeError('Detected circular dependency!')
seen_names.add(name)
# Move deps in front of us if necessary
for dep in thingmap[name].deps:
if dep not in names:
if warn_missing:
logger.warn('%r has missing dependency %r' % (name, dep))
else:
j = names.index(dep)
if j > index:
names.insert(index, names.pop(j))
break # do this index again; the dep we just moved
else:
break # no changes, move to next index
return [thingmap[name] for name in names]
# todo: We could do (basic) minification of the JS
# but it will make the code less readable, so better do this after we've
# source maps.
class Asset:
""" Class to represent an asset (JS or CSS) to be included on the page.
Users will typically use ``app.assets.add_shared_asset()``, see the
corresponding docs for details.
"""
_counter = 0
def __init__(self, name, source=None):
Asset._counter += 1 # so we can sort assets by their instantiation order
self.i = Asset._counter
# Handle name
if not isinstance(name, str):
raise TypeError('Asset name must be str.')
if name.startswith(url_starts):
if source is not None:
raise TypeError('Remote assets cannot have a source: %s' % name)
source = name
name = name.replace('\\', '/').split('/')[-1]
if not name.lower().endswith(('.js', '.css')):
raise ValueError('Asset name must end in .js or .css.')
self._name = name
# Handle source
self._remote = False
self._source_str = None
self._source = source
if source is None:
raise TypeError('Asset needs a source.')
elif isinstance(source, str):
if source.startswith(url_starts):
self._remote = True
elif source.startswith('file://'):
raise TypeError('Cannot specify an asset using "file://", '
'use http or open the file and use contents.')
else:
self._source_str = source
elif callable(source):
pass
else:
raise TypeError('Asset source must be str or callable.')
def __repr__(self):
return '<%s %r at 0x%0x>' % (self.__class__.__name__, self._name, id(self))
@property
def name(self):
""" The (file) name of this asset.
"""
return self._name
@property
def source(self):
""" The source for this asset. Can be str, URL or callable.
"""
return self._source
@property
def remote(self):
""" Whether the asset is remote (client will load it from elsewhere).
If True, the source specifies the URL.
"""
return self._remote
def to_html(self, path='{}', link=3):
""" Get HTML element tag to include in the document.
Parameters:
path (str): the path of this asset, in which '{}' can be used as
a placeholder for the asset name.
link (int): whether to link to this asset:
* 0: the asset is embedded.
* 1: normal assets are embedded, remote assets remain remote.
* 2: the asset is linked (and served by our server).
* 3: (default) normal assets are linked, remote assets remain remote.
"""
path = path.replace('{}', self.name)
if self.name.lower().endswith('.js'):
if self.remote and link in (1, 3):
return "<script src='%s' id='%s'></script>" % (self.source, self.name)
elif link in (0, 1):
code = self.to_string()
s = '\n' if ('\n' in code) else ''
return "<script id='%s'>%s%s%s</script>" % (self.name, s, code, s)
else:
return "<script src='%s' id='%s'></script>" % (path, self.name)
elif self.name.lower().endswith('.css'):
if self.remote and link in (1, 3):
t = "<link rel='stylesheet' type='text/css' href='%s' id='%s' />"
return t % (self.source, self.name)
elif link in (0, 1):
code = self.to_string()
s = '\n' if ('\n' in code) else ''
return "<style id='%s'>%s%s%s</style>" % (self.name, s, code, s)
else:
t = "<link rel='stylesheet' type='text/css' href='%s' id='%s' />"
return t % (path, self.name)
else: # pragma: no cover
raise NameError('Assets must be .js or .css')
def to_string(self):
""" Get the string code for this asset. Even for remote assets.
"""
if self._source_str is None:
if callable(self._source):
self._source_str = self._source()
if not isinstance(self._source_str, str):
t = 'Source function of asset %r did not return a str, but a %s.'
raise ValueError(t % (self.name, self._source.__class__.__name__))
elif self._remote:
self._source_str = self._get_from_url(self._source)
else: # pragma: no cover
assert False, 'This should not happen'
return self._source_str
def _get_from_url(self, url):
if url.startswith(url_starts):
req = Request(url, headers={'User-Agent': 'flexx'})
return urlopen(req, timeout=5.0).read().decode()
else: # pragma: no cover
raise ValueError('_get_from_url() needs a URL string.')
class Bundle(Asset):
""" A bundle is an asset that represents a collection of Asset objects
and JSModule objects. In the output, the source for the modules occurs
after the sources of the assets. Dependency resolution is honoured for
the modules, and the bundle exposes an aggregate of the dependencies,
so that bundles can themselves be sorted.
"""
def __init__(self, name):
super().__init__(name, '')
self._assets = []
self._module_name = name.rsplit('.', 1)[0].split('-')[0]
self._modules = []
self._deps = set()
self._need_sort = False
def __repr__(self):
t = '<%s %r with %i assets and %i modules at 0x%0x>'
return t % (self.__class__.__name__, self._name,
len(self._assets), len(self._modules), id(self))
def add_asset(self, a):
""" Add an asset to the bundle. Assets added this way occur before the
code for the modules in this bundle.
"""
if not isinstance(a, Asset):
raise TypeError('Bundles.add_asset() needs an Asset, not %s.' %
a.__class__.__name__)
if isinstance(a, Bundle):
raise TypeError('Bundles can contain assets and modules, but not bundles.')
self._assets.append(a)
def add_module(self, m):
""" Add a module to the bundle. This will (lazily) invoke a
sort of the list of modules, and define dependencies to other
bundles, so that bundles themselves can be sorted.
"""
ext = '.' + self.name.rsplit('.')[-1].lower()
# Check if module belongs here
if not m.name.startswith(self._module_name):
raise ValueError('Module %s does not belong in bundle %s.' %
(m.name, self.name))
# Add module
self._modules.append(m)
self._need_sort = True
# Add deps for this module
deps = set()
for dep in m.deps:
while '.' in dep:
deps.add(dep)
dep = dep.rsplit('.', 1)[0]
deps.add(dep)
# Clear deps that are represented by this bundle
for dep in deps:
if not (dep.startswith(self._module_name) or
self._module_name.startswith(dep + '.')):
self._deps.add(dep + ext)
@property
def assets(self):
""" The list of assets in this bundle (excluding modules).
"""
return tuple(self._assets)
@property
def modules(self):
""" The list of modules, sorted by name and dependencies.
"""
if self._need_sort:
f = lambda m: m.name
self._modules = solve_dependencies(sorted(self._modules, key=f))
return tuple(self._modules)
@property
def deps(self):
""" The set of dependencies for this bundle, expressed in module names.
"""
return self._deps
def to_string(self):
# Concatenate code strings and add TOC. Module objects do/cache the work.
isjs = self.name.lower().endswith('.js')
toc = []
source = []
for a in self.assets:
toc.append('- asset ' + a.name)
source.append('/* ' + (' %s ' % a.name).center(70, '=') + '*/')
source.append(a.to_string())
for m in self.modules:
s = m.get_js() if isjs else m.get_css()
toc.append('- module ' + m.name)
source.append('/* ' + (' %s ' % m.name).center(70, '=') + '*/')
| |
"1940:21"): "metadataonly",
("sou", "1940:9"): "metadataonly",
("sou", "1940:6"): "metadataonly",
("sou", "1939:51"): "metadataonly",
("sou", "1939:50"): "metadataonly",
("sou", "1939:38"): "metadataonly",
("sou", "1939:37"): "metadataonly",
("sou", "1939:35"): "metadataonly",
("sou", "1939:34"): "metadataonly",
("sou", "1939:26"): "metadataonly",
("sou", "1939:22"): "metadataonly",
("sou", "1939:11"): "metadataonly",
("sou", "1939:3"): "metadataonly",
("sou", "1939:2"): "metadataonly",
("sou", "1939:1"): "metadataonly",
("sou", "1938:58"): "metadataonly",
("sou", "1938:56"): "metadataonly",
("sou", "1938:55"): "metadataonly",
("sou", "1938:53"): "metadataonly",
("sou", "1938:52"): "metadataonly",
("sou", "1938:42"): "metadataonly",
("sou", "1938:34"): "metadataonly",
("sou", "1938:25"): "metadataonly",
("sou", "1938:19"): "metadataonly",
("sou", "1938:16"): "metadataonly",
("sou", "1938:15"): "metadataonly",
("sou", "1938:13"): "metadataonly",
("sou", "1938:7"): "metadataonly",
("sou", "1938:1"): "metadataonly",
("sou", "1937:55"): "metadataonly",
("sou", "1937:52"): "metadataonly",
("sou", "1937:51"): "metadataonly",
("sou", "1937:50"): "metadataonly",
("sou", "1937:44"): "metadataonly",
("sou", "1937:41"): "metadataonly",
("sou", "1937:39"): "metadataonly",
("sou", "1937:37"): "metadataonly",
("sou", "1937:36"): "metadataonly",
("sou", "1937:32"): "metadataonly",
("sou", "1937:31"): "metadataonly",
("sou", "1937:29"): "metadataonly",
("sou", "1937:26"): "metadataonly",
("sou", "1937:23"): "metadataonly",
("sou", "1937:22"): "metadataonly",
("sou", "1937:14"): "metadataonly",
("sou", "1937:10"): "metadataonly",
("sou", "1937:8"): "metadataonly",
("sou", "1937:7"): "metadataonly",
("sou", "1937:5"): "metadataonly",
("sou", "1937:1"): "metadataonly",
("sou", "1936:49"): "metadataonly",
("sou", "1936:45"): "metadataonly",
("sou", "1936:42"): "metadataonly",
("sou", "1936:41"): "metadataonly",
("sou", "1936:37"): "metadataonly",
("sou", "1936:36"): "metadataonly",
("sou", "1936:35"): "metadataonly",
("sou", "1936:28"): "metadataonly",
("sou", "1936:25"): "metadataonly",
("sou", "1936:15"): "metadataonly",
("sou", "1936:14"): "metadataonly",
("sou", "1936:10"): "metadataonly",
("sou", "1936:9"): "metadataonly",
("sou", "1936:8"): "metadataonly",
("sou", "1936:5"): "metadataonly",
("sou", "1936:4"): "metadataonly",
("sou", "1936:3"): "metadataonly",
("sou", "1936:1"): "metadataonly",
("sou", "1935:62"): "metadataonly",
("sou", "1935:51"): "metadataonly",
("sou", "1935:49"): "metadataonly",
("sou", "1935:48"): "metadataonly",
("sou", "1935:46"): "metadataonly",
("sou", "1935:45"): "metadataonly",
("sou", "1935:43"): "metadataonly",
("sou", "1935:35"): "metadataonly",
("sou", "1935:27"): "metadataonly",
("sou", "1935:26"): "metadataonly",
("sou", "1935:24"): "metadataonly",
("sou", "1935:13"): "metadataonly",
("sou", "1935:10"): "metadataonly",
("sou", "1935:9"): "metadataonly",
("sou", "1935:4"): "metadataonly",
("sou", "1935:1"): "metadataonly",
("sou", "1934:54"): "metadataonly",
("sou", "1934:53"): "metadataonly",
("sou", "1934:52"): "metadataonly",
("sou", "1934:48"): "metadataonly",
("sou", "1934:43"): "metadataonly",
("sou", "1934:33"): "metadataonly",
("sou", "1934:32"): "metadataonly",
("sou", "1934:31"): "metadataonly",
("sou", "1934:27"): "metadataonly",
("sou", "1934:25"): "metadataonly",
("sou", "1934:18"): "metadataonly",
("sou", "1934:14"): "metadataonly",
("sou", "1934:13"): "metadataonly",
("sou", "1934:9"): "metadataonly",
("sou", "1934:4"): "metadataonly",
("sou", "1933:38"): "metadataonly",
("sou", "1933:33"): "metadataonly",
("sou", "1933:30"): "metadataonly",
("sou", "1933:29"): "metadataonly",
("sou", "1933:23"): "metadataonly",
("sou", "1933:20"): "metadataonly",
("sou", "1933:16"): "metadataonly",
("sou", "1933:15"): "metadataonly",
("sou", "1933:14"): "metadataonly",
("sou", "1933:10"): "metadataonly",
("sou", "1933:7"): "metadataonly",
("sou", "1933:6"): "metadataonly",
("sou", "1933:5"): "metadataonly",
("sou", "1933:2"): "metadataonly",
("sou", "1932:39"): "metadataonly",
("sou", "1932:38"): "metadataonly",
("sou", "1932:37"): "metadataonly",
("sou", "1932:34"): "metadataonly",
("sou", "1932:32"): "metadataonly",
("sou", "1932:27"): "metadataonly",
("sou", "1932:19"): "metadataonly",
("sou", "1932:13"): "metadataonly",
("sou", "1932:12"): "metadataonly",
("sou", "1932:9"): "metadataonly",
("sou", "1931:38"): "metadataonly",
("sou", "1931:37"): "metadataonly",
("sou", "1931:19"): "metadataonly",
("sou", "1931:14"): "metadataonly",
("sou", "1931:12"): "metadataonly",
("sou", "1931:3"): "metadataonly",
("sou", "1931:1"): "metadataonly",
("sou", "1930:35"): "metadataonly",
("sou", "1930:32"): "metadataonly",
("sou", "1930:29"): "metadataonly",
("sou", "1930:27"): "metadataonly",
("sou", "1930:18"): "metadataonly",
("sou", "1930:15"): "metadataonly",
("sou", "1930:14"): "metadataonly",
("sou", "1930:13"): "metadataonly",
("sou", "1930:4"): "metadataonly",
("sou", "1930:2"): "metadataonly",
("sou", "1929:34"): "metadataonly",
("sou", "1929:31"): "metadataonly",
("sou", "1929:28"): "metadataonly",
("sou", "1929:26"): "metadataonly",
("sou", "1929:15"): "metadataonly",
("sou", "1929:10"): "metadataonly",
("sou", "1929:3"): "metadataonly",
("sou", "1928:26"): "metadataonly",
("sou", "1928:18"): "metadataonly",
("sou", "1928:12"): "metadataonly",
("sou", "1928:10"): "metadataonly",
("sou", "1928:2"): "metadataonly",
("sou", "1927:30"): "metadataonly",
("sou", "1927:27"): "metadataonly",
("sou", "1927:12"): "metadataonly",
("sou", "1927:10"): "metadataonly",
("sou", "1927:7"): "metadataonly",
("sou", "1927:1"): "metadataonly",
("sou", "1926:27"): "metadataonly",
("sou", "1926:12"): "metadataonly",
("sou", "1926:3"): "metadataonly",
("sou", "1925:35"): "metadataonly",
("sou", "1925:28"): "metadataonly",
("sou", "1925:27"): "metadataonly",
("sou", "1925:22"): "metadataonly",
("sou", "1925:17"): "metadataonly",
("sou", "1925:14"): "metadataonly",
("sou", "1925:10"): "metadataonly",
("sou", "1925:1"): "metadataonly",
("sou", "1924:38"): "metadataonly",
("sou", "1924:31"): "metadataonly",
("sou", "1924:30"): "metadataonly",
("sou", "1924:22"): "metadataonly",
("sou", "1924:19"): "metadataonly",
("sou", "1923:78"): "metadataonly",
("sou", "1923:67"): "metadataonly",
("sou", "1923:61"): "metadataonly",
("sou", "1923:56"): "metadataonly",
("sou", "1923:54"): "metadataonly",
("sou", "1923:37"): "metadataonly",
("sou", "1923:31"): "metadataonly",
("sou", "1923:24"): "metadataonly",
("sou", "1923:20"): "metadataonly",
("sou", "1923:19"): "metadataonly",
("sou", "1923:18"): "metadataonly",
("sou", "1923:17"): "metadataonly",
("sou", "1923:15"): "metadataonly",
("sou", "1923:7"): "metadataonly",
("sou", "1922:55"): "metadataonly",
("sou", "1922:54"): "metadataonly",
("sou", "1922:43"): "metadataonly",
("sou", "1922:37"): "metadataonly",
("sou", "1922:34"): "metadataonly",
("sou", "1922:27"): "metadataonly",
("sou", "1922:26"): "metadataonly",
("sou", "1922:13"): "metadataonly",
("sou", "1922:9"): "default", # Viktigt även för dagens PL?
("prop", "2002/03:58"): "metadataonly",
("prop", "2002/03:14"): "metadataonly",
("prop", "2001/02:116"): "metadataonly",
("prop", "2001/02:76"): "metadataonly",
("prop", "2000/01:141"): "metadataonly",
("prop", "2000/01:61"): "metadataonly",
("prop", "1998/99:141"): "metadataonly",
("prop", "2002/03:108"): "metadataonly",
("prop", "1999/2000:80"): "metadataonly",
("prop", "1998/99:54"): "metadataonly",
("prop", "1997/98:132"): "metadataonly",
("prop", "1996/97:58"): "metadataonly",
("prop", "1996/97:47"): "metadataonly",
("prop", "1995/96:112"): "metadataonly",
("prop", "1994/95:185"): "metadataonly",
("prop", "1994/95:79"): "metadataonly",
("prop", "1994/95:47"): "metadataonly",
("prop", "1994/95:37"): "metadataonly",
("prop", "1993/94:55"): "metadataonly",
("prop", "1993/94:18"): "metadataonly",
("prop", "1994/95:211"): "metadataonly",
("prop", "1993/94:254"): "metadataonly",
("prop", "1992/93:252"): "metadataonly",
("prop", "1992/93:247"): "metadataonly",
("prop", "1992/93:228"): "metadataonly",
("prop", "1992/93:221"): "metadataonly",
("prop", "1992/93:212"): "metadataonly",
("prop", "1991/92:171"): "metadataonly",
("prop", "1991/92:147"): "metadataonly",
("prop", "1991/92:144"): "metadataonly",
("prop", "1991/92:26"): "metadataonly",
("prop", "1991/92:12"): "metadataonly",
("prop", "1991/92:6"): "metadataonly",
("prop", "1990/91:105"): "metadataonly",
("prop", "1990/91:104"): "metadataonly",
("prop", "1990/91:57"): "metadataonly",
("prop", "1990/91:35"): "metadataonly",
("prop", "1990/91:22"): "metadataonly",
("prop", "1989/90:97"): "metadataonly",
("prop", "1989/90:93"): "metadataonly",
("prop", "1989/90:91"): "metadataonly",
("prop", "1989/90:16"): "metadataonly",
("prop", "1989/90:5"): "metadataonly",
("prop", "1988/89:146"): "metadataonly",
("prop", "1988/89:104"): "metadataonly",
("prop", "1988/89:87"): "metadataonly",
("prop", "1987/88:132"): "metadataonly",
("prop", "1987/88:56"): "metadataonly",
("prop", "1987/88:27"): "metadataonly",
("prop", "1987/88:19"): "metadataonly",
("prop", "1987/88:13"): "metadataonly",
("prop", "1986/87:67"): "metadataonly",
("prop", "1986/87:22"): "metadataonly",
("prop", "1986/87:10"): "metadataonly",
("prop", "1985/86:172"): "metadataonly",
("prop", "1985/86:168"): "metadataonly",
("prop", "1985/86:163"): "metadataonly",
("prop", "1985/86:152"): "metadataonly",
("prop", "1985/86:148"): "metadataonly",
("prop", "1985/86:144"): "metadataonly",
("prop", "1985/86:139"): "metadataonly",
("prop", "1985/86:137"): "metadataonly",
("prop", "1985/86:135"): "metadataonly",
("prop", "1985/86:113"): "metadataonly",
("prop", "1985/86:111"): "metadataonly",
("prop", "1985/86:108"): "metadataonly",
("prop", "1985/86:106"): "metadataonly",
("prop", "1985/86:97"): "metadataonly",
("prop", "1985/86:94"): "metadataonly",
("prop", "1985/86:91"): "metadataonly",
("prop", "1985/86:84"): "metadataonly",
("prop", "1985/86:82"): "metadataonly",
("prop", "1985/86:71"): "metadataonly",
("prop", "1985/86:69"): "metadataonly",
("prop", "1985/86:44"): "metadataonly",
("prop", "1985/86:37"): "metadataonly",
("prop", "1985/86:35"): "metadataonly",
("prop", "1985/86:24"): "metadataonly",
("prop", "1985/86:19"): "metadataonly",
("prop", "1985/86:18"): "metadataonly",
("prop", "1985/86:16"): "metadataonly",
("prop", "1985/86:6"): "metadataonly",
("prop", "1984/85:217"): "metadataonly",
("prop", "1984/85:206"): "metadataonly",
("prop", "1984/85:205"): "metadataonly",
("prop", "1984/85:204"): "metadataonly",
("prop", "1984/85:197"): "metadataonly",
("prop", "1984/85:192"): "metadataonly",
("prop", "1984/85:182"): "metadataonly",
("prop", "1984/85:174"): "metadataonly",
("prop", "1984/85:162"): "metadataonly",
("prop", "1984/85:154"): "metadataonly",
("prop", "1984/85:152"): "metadataonly",
("prop", "1984/85:134"): "metadataonly",
("prop", "1984/85:102"): "metadataonly",
("prop", "1984/85:95"): "metadataonly",
("prop", "1984/85:92"): "metadataonly",
("prop", "1984/85:84"): "metadataonly",
("prop", "1984/85:74"): "metadataonly",
("prop", "1984/85:73"): "metadataonly",
("prop", "1984/85:69"): "metadataonly",
("prop", "1984/85:66"): "metadataonly",
("prop", "1984/85:65"): "metadataonly",
("prop", "1984/85:58"): "metadataonly",
("prop", "1984/85:48"): "metadataonly",
("prop", "1984/85:34"): "metadataonly",
("prop", "1984/85:29"): "metadataonly",
("prop", "1984/85:24"): "metadataonly",
("prop", "1984/85:12"): "metadataonly",
("prop", "1983/84:134"): "metadataonly",
("prop", "1983/84:106"): "metadataonly",
("prop", "1983/84:98"): "metadataonly",
("prop", "1983/84:43"): "metadataonly",
("prop", "1983/84:34"): "metadataonly",
("prop", "1983/84:29"): "metadataonly",
("prop", "1983/84:14"): "metadataonly",
("prop", "1983/84:9"): "metadataonly",
("prop", "1983/84:5"): "metadataonly",
("prop", "1982/83:162"): "metadataonly",
("prop", "1982/83:69"): "metadataonly",
("prop", "1982/83:33"): "metadataonly",
("prop", "1982/83:4"): "metadataonly",
("prop", "1981/82:223"): "metadataonly",
("prop", "1981/82:209"): "metadataonly",
("prop", "1981/82:208"): "metadataonly",
("prop", "1981/82:202"): "metadataonly",
("prop", "1981/82:200"): "metadataonly",
("prop", "1981/82:184"): "metadataonly",
("prop", "1981/82:161"): "metadataonly",
("prop", "1981/82:140"): "metadataonly",
("prop", "1981/82:138"): "metadataonly",
("prop", "1981/82:132"): "metadataonly",
("prop", "1981/82:119"): "metadataonly",
("prop", "1981/82:110"): "metadataonly",
("prop", "1981/82:87"): "metadataonly",
("prop", "1981/82:84"): "metadataonly",
("prop", "1981/82:62"): "metadataonly",
("prop", "1981/82:61"): "metadataonly",
("prop", "1981/82:54"): "metadataonly",
("prop", "1981/82:47"): "metadataonly",
("prop", "1981/82:39"): "metadataonly",
("prop", "1981/82:38"): "metadataonly",
("prop", "1981/82:24"): "metadataonly",
("prop", "1981/82:18"): "metadataonly",
("prop", "1981/82:17"): "metadataonly",
("prop", "1981/82:6"): "metadataonly",
("prop", "1981/82:5"): "metadataonly",
("prop", "1980/81:164"): "metadataonly",
("prop", "1980/81:156"): "metadataonly",
("prop", "1980/81:140"): "metadataonly",
("prop", "1980/81:128"): "metadataonly",
("prop", "1980/81:121"): "metadataonly",
("prop", "1980/81:85"): "metadataonly",
("prop", "1980/81:83"): "metadataonly",
("prop", "1980/81:81"): "metadataonly",
("prop", "1980/81:72"): "metadataonly",
("prop", "1980/81:69"): "metadataonly",
("prop", "1980/81:40"): "metadataonly",
("prop", "1980/81:31"): "metadataonly",
("prop", "1980/81:30"): "metadataonly",
("prop", "1980/81:15"): "metadataonly",
("prop", "1980/81:14"): "metadataonly",
("prop", "1979/80:153"): "metadataonly",
("prop", "1979/80:140"): "metadataonly",
("prop", "1979/80:131"): "metadataonly",
("prop", "1979/80:116"): "metadataonly",
("prop", "1979/80:79"): "metadataonly",
("prop", "1979/80:70"): "metadataonly",
("prop", "1979/80:47"): "metadataonly",
("prop", "1979/80:45"): "metadataonly",
("prop", "1979/80:37"): "metadataonly",
("prop", "1979/80:3"): "metadataonly",
("prop", "1978/79:216"): "metadataonly",
("prop", "1978/79:159"): "metadataonly",
("prop", "1978/79:155"): "metadataonly",
("prop", "1978/79:131"): "metadataonly",
("prop", "1978/79:78"): "metadataonly",
("prop", "1977/78:180"): "metadataonly",
("prop", "1977/78:173"): "metadataonly",
("prop", "1977/78:123"): "metadataonly",
("prop", "1977/78:118"): "metadataonly",
("prop", "1977/78:103"): "metadataonly",
("prop", "1977/78:95"): "metadataonly",
("prop", "1977/78:29"): "metadataonly",
("prop", "1977/78:26"): "metadataonly",
("prop", "1977/78:21"): "metadataonly",
("prop", "1977/78:18"): "metadataonly",
("prop", "1977/78:3"): "metadataonly",
("prop", "1976/77:154"): "metadataonly",
("prop", "1976/77:145"): "metadataonly",
("prop", "1976/77:65"): "metadataonly",
("prop", "1976/77:37"): "metadataonly",
("prop", "1975/76:212"): "metadataonly",
("prop", "1975/76:203"): "metadataonly",
("prop", "1975/76:172"): "metadataonly",
("prop", "1975/76:143"): "metadataonly",
("prop", "1975/76:85"): "metadataonly",
("prop", "1975/76:27"): "metadataonly",
("prop", "1975:90"): "metadataonly",
("prop", "1975:83"): "metadataonly",
("prop", "1975:79"): "metadataonly",
("prop", "1975:74"): "metadataonly",
("prop", "1975:61"): "metadataonly",
("prop", "1975:51"): "metadataonly",
("prop", "1975:47"): "metadataonly",
("prop", "1975:44"): "metadataonly",
("prop", "1975:41"): "metadataonly",
("prop", "1975:39"): "metadataonly",
("prop", "1975:7"): "metadataonly",
("prop", "1974:179"): "metadataonly",
("prop", "1974:153"): "metadataonly",
("prop", "1974:134"): "metadataonly",
("prop", "1974:133"): "metadataonly",
("prop", "1974:125"): "metadataonly",
("prop", "1974:117"): "metadataonly",
("prop", "1974:112"): "metadataonly",
("prop", "1974:99"): "metadataonly",
("prop", "1974:93"): "metadataonly",
("prop", "1974:92"): "metadataonly",
("prop", "1974:90"): "metadataonly",
("prop", "1974:86"): "metadataonly",
("prop", "1974:76"): "metadataonly",
("prop", "1974:75"): "metadataonly",
("prop", "1974:71"): "metadataonly",
("prop", "1974:62"): "metadataonly",
("prop", "1974:60"): "metadataonly",
("prop", "1974:52"): "metadataonly",
("prop", "1974:40"): | |
<gh_stars>1-10
"""Cloud storage abstract System"""
from abc import abstractmethod, ABC
from collections import OrderedDict, namedtuple
from re import compile
from stat import S_IFDIR, S_IFREG, S_IFLNK
from posixpath import join, normpath, dirname
from dateutil.parser import parse
from airfs._core.io_base import WorkerPoolBase
from airfs._core.compat import Pattern, getgid, getuid
from airfs._core.exceptions import (
ObjectNotFoundError,
ObjectPermissionError,
ObjectNotImplementedError,
ObjectUnsupportedOperation,
)
from airfs._core.functions_core import SeatsCounter
class SystemBase(ABC, WorkerPoolBase):
"""
Cloud storage system handler.
This class subclasses are not intended to be public and are implementation details.
This base system is for Object storage that does not handles files with a true
hierarchy like file systems. Directories are virtual with this kind of storage.
Args:
storage_parameters (dict): Storage configuration parameters.
Generally, client configuration and credentials.
unsecure (bool): If True, disables TLS/SSL to improves transfer performance.
But makes connection unsecure.
roots (tuple): Tuple of roots to force use.
"""
__slots__ = (
"_storage_parameters",
"_unsecure",
"_storage",
"_client",
"_cache",
"_roots",
)
#: If True, storage support symlinks
SUPPORTS_SYMLINKS = False
# By default, assumes that information are in a standard HTTP header
_SIZE_KEYS = ("Content-Length",)
_CTIME_KEYS = ()
_MTIME_KEYS = ("Last-Modified",)
_CHAR_FILTER = compile(r"[^a-z0-9_]*")
def __init__(self, storage_parameters=None, unsecure=False, roots=None, **_):
WorkerPoolBase.__init__(self)
if storage_parameters:
storage_parameters = storage_parameters.copy()
for key in tuple(storage_parameters):
if key.startswith("airfs."):
del storage_parameters[key]
else:
storage_parameters = dict()
self._storage_parameters = storage_parameters
self._unsecure = unsecure
self._storage = self.__module__.rsplit(".", 1)[1]
self._client = None
self._cache = {}
if roots:
self._roots = roots
else:
self._roots = self._get_roots()
@property
def storage(self):
"""
Storage name
Returns:
str: Storage
"""
return self._storage
@property
def client(self):
"""
Storage client
Returns:
client
"""
if self._client is None:
self._client = self._get_client()
return self._client
def copy(self, src, dst, other_system=None):
"""
Copy object of the same storage.
Args:
src (str): Path or URL.
dst (str): Path or URL.
other_system (airfs._core.io_system.SystemBase subclass):
Other storage system. May be required for some storage.
"""
# This method is intended to copy objects to and from a same storage
# It is possible to define methods to copy from a different storage by creating
# a "copy_from_<src_storage>" method for the target storage and, vice versa, to
# copy to a different storage by creating a "copy_to_<dst_storage>" method.
# Theses methods must have the same signature as "copy".
# "other_system" is optional and will be:
# - The destination storage system with "copy_to_<src_storage>" method.
# - The source storage system with "copy_from_<src_storage>" method.
# - None elsewhere.
# Note that if no "copy_from"/'copy_to" methods are defined, copy are performed
# over the current machine with "shutil.copyfileobj".
raise ObjectUnsupportedOperation
def exists(
self,
path=None,
client_kwargs=None,
assume_exists=None,
header=None,
follow_symlinks=None,
):
"""
Return True if path refers to an existing path.
Args:
path (str): Path or URL.
client_kwargs (dict): Client arguments.
assume_exists (bool or None): This value define the value to return in the
case there is no enough permission to determinate the existing status of
the file. If set to None, the permission exception is reraised
(Default behavior). if set to True or False, return this value.
header (dict): Object header.
follow_symlinks (bool): Follow symlinks.
Returns:
bool: True if exists.
"""
try:
path, client_kwargs, header = self.resolve(
path, client_kwargs, header, follow_symlinks
)
self.head(path, client_kwargs, header)
except ObjectNotFoundError:
return False
except ObjectPermissionError:
if assume_exists is None:
raise
return assume_exists
return True
@abstractmethod
def _get_client(self):
"""
Storage client
Returns:
client
"""
@abstractmethod
def get_client_kwargs(self, path):
"""
Get base keyword arguments for client for a specific path.
Args:
path (str): Absolute path or URL.
Returns:
dict: client args
"""
def getctime(self, path=None, client_kwargs=None, header=None):
"""
Return the creation time of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
float: The number of seconds since the epoch (see the time module).
"""
return self._getctime_from_header(self.head(path, client_kwargs, header))
def _getctime_from_header(self, header):
"""
Return the time from header
Args:
header (dict): Object header.
Returns:
float: The number of seconds since the epoch
"""
return self._get_time(header, self._CTIME_KEYS, "getctime")
def getmtime(self, path=None, client_kwargs=None, header=None):
"""
Return the time of last access of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
float: The number of seconds since the epoch (see the time module).
"""
return self._getmtime_from_header(self.head(path, client_kwargs, header))
def _getmtime_from_header(self, header):
"""
Return the time from header
Args:
header (dict): Object header.
Returns:
float: The number of seconds since the epoch
"""
return self._get_time(header, self._MTIME_KEYS, "getmtime")
@staticmethod
def _get_time(header, keys, name):
"""
Get time from header
Args:
header (dict): Object header.
keys (tuple of str): Header keys.
name (str): Method name.
Returns:
float: The number of seconds since the epoch
"""
for key in keys:
try:
date_value = header[key]
except KeyError:
continue
try:
return parse(date_value).timestamp()
except TypeError:
return float(date_value)
raise ObjectUnsupportedOperation(name)
@abstractmethod
def _get_roots(self):
"""
Return URL roots for this storage.
Returns:
tuple of str or re.Pattern: URL roots
"""
def getsize(self, path=None, client_kwargs=None, header=None):
"""
Return the size, in bytes, of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
int: Size in bytes.
"""
return self._getsize_from_header(self.head(path, client_kwargs, header))
def _getsize_from_header(self, header):
"""
Return the size from header
Args:
header (dict): Object header.
Returns:
int: Size in bytes.
"""
for key in self._SIZE_KEYS:
try:
return int(header[key])
except KeyError:
continue
else:
raise ObjectUnsupportedOperation("getsize")
def isdir(
self,
path=None,
client_kwargs=None,
virtual_dir=True,
assume_exists=None,
header=None,
follow_symlinks=None,
):
"""
Return True if path is an existing directory.
Args:
path (str): Path or URL.
client_kwargs (dict): Client arguments.
virtual_dir (bool): If True, checks if directory exists virtually if an
object path if not exists as a specific object.
assume_exists (bool or None): This value define the value to return in the
case there is no enough permission to determinate the existing status of
the file. If set to None, the permission exception is reraised
(Default behavior). if set to True or False, return this value.
header (dict): Object header.
follow_symlinks (bool): Follow symlinks.
Returns:
bool: True if directory exists.
"""
relative = self.relpath(path)
if not relative:
# Root always exists and is a directory
return True
if path[-1] == "/" or self.is_locator(relative, relative=True):
exists = self.exists(
path, client_kwargs, assume_exists, header, follow_symlinks
)
if exists:
return True
elif virtual_dir:
try:
next(self.list_objects(relative, relative=True, max_results=1))
return True
except (StopIteration, ObjectNotFoundError, ObjectUnsupportedOperation):
return False
return False
def isfile(
self,
path=None,
client_kwargs=None,
assume_exists=None,
header=None,
follow_symlinks=None,
):
"""
Return True if path is an existing regular file.
Args:
path (str): Path or URL.
client_kwargs (dict): Client arguments.
assume_exists (bool or None): This value define the value to return in the
case there is no enough permission to determinate the existing status of
the file. If set to None, the permission exception is reraised
(Default behavior). if set to True or False, return this value.
header (dict): Object header.
follow_symlinks (bool): Follow symlinks.
Returns:
bool: True if file exists.
"""
relative = self.relpath(path)
if not relative:
# Root always exists and is a directory
return False
if path[-1] != "/" and not self.is_locator(path, relative=True):
return self.exists(
path, client_kwargs, assume_exists, header, follow_symlinks
)
return False
@property
def storage_parameters(self):
"""
Storage parameters
Returns:
dict: Storage parameters
"""
return self._storage_parameters
@abstractmethod
def _head(self, client_kwargs):
"""
Returns object HTTP header.
Args:
client_kwargs (dict): Client arguments.
Returns:
dict: HTTP header.
"""
def head(self, path=None, client_kwargs=None, header=None):
"""
Returns object HTTP header.
Args:
path (str): Path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
dict: HTTP header.
"""
if header is not None:
return header
elif client_kwargs is None:
client_kwargs = self.get_client_kwargs(path)
return self._head(client_kwargs)
@property
def roots(self):
"""
Return URL roots for this storage.
Returns:
tuple of str: URL roots
"""
return self._roots
@roots.setter
def roots(self, roots):
"""
Set URL roots for this storage.
Args:
roots (tuple of str): URL roots
"""
self._roots = roots
def relpath(self, path):
"""
Get path relative to storage.
args:
path (str): Absolute path or URL.
Returns:
str: relative path.
"""
for root in self.roots:
if isinstance(root, Pattern):
match | |
attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'align': attribAlign_492202555580820100,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'div'
# generic language/style container
# =================== Paragraphs =======================================
class P(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'align': attribAlign_492202555580820100,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'align': attribAlign_492202555580820100,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'p'
# =================== Headings =========================================
#
# There are six levels of headings from h1 (the most important)
# to h6 (the least important).
#
class H1(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'align': attribAlign_492202555580820100,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'align': attribAlign_492202555580820100,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'h1'
class H2(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'align': attribAlign_492202555580820100,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'align': attribAlign_492202555580820100,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'h2'
class H3(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'align': attribAlign_492202555580820100,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'align': attribAlign_492202555580820100,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'h3'
class H4(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'align': attribAlign_492202555580820100,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'align': attribAlign_492202555580820100,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'h4'
class H5(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'align': attribAlign_492202555580820100,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'align': attribAlign_492202555580820100,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'h5'
class H6(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'align': attribAlign_492202555580820100,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'align': attribAlign_492202555580820100,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'h6'
# =================== Lists ============================================
# Unordered list bullet styles
# Unordered list
class Ul(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'compact': attribCompact_1275915173483479104,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'type': attribType_777549456165371904,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'compact': attribCompact_1275915173483479104,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'type': attribType_777549456165371904,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'ul'
# Ordered list numbering style
#
# 1 arabic numbers 1, 2, 3, ...
# a lower alpha a, b, c, ...
# A upper alpha A, B, C, ...
# i lower roman i, ii, iii, ...
# I upper roman I, II, III, ...
#
# The style is applied to the sequence number which by default
# is reset to 1 for the first list item in an ordered list.
#
# Ordered (numbered) list
class Ol(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'compact': attribCompact_1275915173483479104,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'start': attribStart_1707688972880919081,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'type': attribType_2839642281990897124,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'compact': attribCompact_1275915173483479104,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'start': attribStart_1707688972880919081,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'type': attribType_2839642281990897124,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'ol'
# single column list (DEPRECATED)
class Menu(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'compact': attribCompact_1275915173483479104,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
| |
#!usr/bin/env python
import sys
import os
import pickle
import pytest
import jip
from jip.pipelines import Pipeline
from jip.tools import Tool, tool, pipeline
from jip.options import Option
tool_1_def = """\
Usage: tools [-i <input>] [-o <output>] [-x <other>]
Options:
-i, --input <input> The input
[Default: stdin]
-o, --output <output> The output
[Default: stdout]
-x Other option
"""
@tool()
def nop():
return ""
def test_graph_create():
p = Pipeline()
a = p.run('nop')
b = p.run('nop')
p.run('nop')
assert len(p._nodes) == 3
assert p.add_edge(a, b) is not None
assert len(p._edges) == 1
def test_missing_node_for_edge_insert():
p = Pipeline()
assert p.add_edge("A", "B") is None
def test_topological_sort():
p = Pipeline()
a = p.run('nop')
assert a.name == "nop"
b = p.run('nop')
assert a.name == "nop.0"
assert b.name == "nop.1"
c = p.run('nop')
assert a.name == "nop.0"
assert b.name == "nop.1"
assert c.name == "nop.2"
p.add_edge(c, b)
p.add_edge(b, a)
sorted_nodes = [n for n in p.topological_order()]
assert sorted_nodes == [c, b, a]
def test_remove_node():
p = Pipeline()
a = p.run('nop')
b = p.run('nop')
c = p.run('nop')
p.add_edge(c, b)
p.add_edge(b, a)
p.remove(b)
assert len(p._nodes) == 2
assert len(p._edges) == 0
for node in p.nodes():
assert len(node._edges) == 0
def test_edge_equality():
p = Pipeline()
a = p.run('nop')
b = p.run('nop')
assert p.add_edge(a, b) is not None
assert p.add_edge(a, b) is not None
assert len(p._edges) == 1
def test_node_equality():
p = Pipeline()
tool = Tool(tool_1_def)
p.add(tool)
p.add(tool)
assert len(p._nodes) == 1
def test_get_node_properties():
tool = Tool(tool_1_def)
p = Pipeline()
node = p.add(tool)
assert isinstance(node.input, Option)
with pytest.raises(AttributeError) as ex:
node.xxx
assert str(ex.value) == "Option 'xxx' not found in tools"
def test_set_node_properties():
tool = Tool(tool_1_def)
p = Pipeline()
node = p.add(tool)
opt = node.input
assert isinstance(opt, Option)
node.input = "test.txt"
assert opt.raw() == "test.txt"
with pytest.raises(AttributeError) as ex:
node.xxx = "A"
assert str(ex.value) == "Option 'xxx' not found in tools"
def test_delegate_singleton_option():
tool_1 = Tool(tool_1_def)
tool_2 = Tool(tool_1_def)
p = Pipeline()
node_1 = p.add(tool_1)
node_2 = p.add(tool_2)
node_2.input = node_1.output
assert len(p._nodes) == 2
assert len(p._edges) == 1
edge = p.get_edge(node_1, node_2)
assert edge is not None
assert len(edge._links) == 1
def test_delegate_singleton_node_default_option():
tool_1 = Tool(tool_1_def)
tool_2 = Tool(tool_1_def)
p = Pipeline()
node_1 = p.add(tool_1)
node_2 = p.add(tool_2)
node_2.input = node_1
assert len(p._nodes) == 2
assert len(p._edges) == 1
edge = p.get_edge(node_1, node_2)
assert edge is not None
assert len(edge._links) == 1
def test_delegate_list_option():
tool_1 = Tool(tool_1_def)
tool_2 = Tool(tool_1_def)
tool_3 = Tool(tool_1_def)
p = Pipeline()
node_1 = p.add(tool_1)
node_2 = p.add(tool_2)
node_3 = p.add(tool_3)
node_3.input = [node_1.output, node_2.output]
assert len(node_3.input.value) == 2
assert len(p._edges) == 2
edge = p.get_edge(node_1, node_3)
assert edge is not None
assert len(edge._links) == 1
edge_2 = p.get_edge(node_2, node_3)
assert edge_2 is not None
assert len(edge_2._links) == 1
def test_delegate_list_node_default_option():
tool_1 = Tool(tool_1_def)
tool_2 = Tool(tool_1_def)
tool_3 = Tool(tool_1_def)
p = Pipeline()
node_1 = p.add(tool_1)
node_2 = p.add(tool_2)
node_3 = p.add(tool_3)
node_3.input = [node_1, node_2]
assert len(p._edges) == 2
edge = p.get_edge(node_1, node_3)
assert edge is not None
assert len(edge._links) == 1
edge_2 = p.get_edge(node_2, node_3)
assert edge_2 is not None
assert len(edge_2._links) == 1
def test_find_fanout_options():
tool = Tool(tool_1_def)
p = Pipeline()
node = p.add(tool)
node.input = ["test_1.txt", "test_2.txt"]
assert len(node.input.value) == 2
assert len(node.input) == 2
assert p._get_fanout_options(node) == [node.input]
def test_expand_single_node():
tool = Tool(tool_1_def)
p = Pipeline()
node = p.add(tool)
node.input = ["test_1.txt", "test_2.txt"]
p.expand(validate=False)
assert len(p._nodes) == 2
assert len(p._edges) == 0
inputs = []
for node in p.nodes():
inputs.append(node.input.get())
assert sorted(inputs) == [os.path.join(os.getcwd(), "test_1.txt"),
os.path.join(os.getcwd(), "test_2.txt")]
def test_expand_two_nodes_both_fan_out():
tool_1 = Tool(tool_1_def, "T1")
tool_2 = Tool(tool_1_def, "T2")
p = Pipeline()
node_1 = p.add(tool_1)
node_1.input = ["test_1.txt", "test_2.txt"]
node_2 = p.add(tool_2)
node_2.input = node_1.output
assert len(p._nodes) == 2
assert len(p._edges) == 1
p.expand(validate=False)
assert len(p._nodes) == 4
assert len(p._edges) == 2
def test_expand_three_nodes_two_fan_out():
tool_1 = Tool(tool_1_def, "T1")
tool_2 = Tool(tool_1_def, "T2")
tool_3 = Tool(tool_1_def, "T3")
p = Pipeline()
node_1 = p.add(tool_1)
node_1.input = ["test_1.txt", "test_2.txt"]
node_2 = p.add(tool_2)
node_2.input = node_1.output
node_3 = p.add(tool_3)
node_3.x = "other"
node_3 = p.add(tool_3)
node_2.x = node_3.x
assert len(p._nodes) == 3
assert len(p._edges) == 2
p.expand(validate=False)
assert len(p._nodes) == 5
assert len(p._edges) == 6
# test operators
def test_gt_to_file_name():
tool_1 = Tool(tool_1_def, "T1")
p = Pipeline()
node_1 = p.add(tool_1)
assert node_1._tool.options['output'] == sys.stdout
node_1 > "A.txt"
assert node_1._tool.options['output'] == "A.txt"
# test operators
def test_lt_from_file_name():
tool_1 = Tool(tool_1_def, "T1")
p = Pipeline()
node_1 = p.add(tool_1)
assert node_1.input == sys.stdin
node_1 < "A.txt"
assert node_1.input == "A.txt"
# test operators
def test_gt_to_node():
tool_1 = Tool(tool_1_def, "T1")
tool_2 = Tool(tool_1_def, "T2")
tool_3 = Tool(tool_1_def, "T3")
p = Pipeline()
node_1 = p.add(tool_1)
node_2 = p.add(tool_2)
node_3 = p.add(tool_3)
assert len(list(node_1.outgoing())) == 0
assert len(list(node_2.outgoing())) == 0
assert len(list(node_3.outgoing())) == 0
assert node_1._tool.options['output'] == sys.stdout
assert node_2._tool.options['input'] == sys.stdin
assert node_3._tool.options['input'] == sys.stdin
(node_1 > node_2) > node_3
n_1_out = node_1._tool.options['output'].raw()
n_2_in = node_2._tool.options['input'].raw()
n_2_out = node_2._tool.options['output'].raw()
n_3_in = node_3._tool.options['output'].raw()
assert n_1_out == n_2_in
assert n_2_out == n_3_in
assert len(list(node_1.outgoing())) == 1
assert len(list(node_2.outgoing())) == 1
assert len(list(node_3.outgoing())) == 0
# test operators
def test_lt_from_node():
tool_1 = Tool(tool_1_def, "T1")
tool_2 = Tool(tool_1_def, "T2")
tool_3 = Tool(tool_1_def, "T3")
p = Pipeline()
node_1 = p.add(tool_1)
node_2 = p.add(tool_2)
node_3 = p.add(tool_3)
assert len(list(node_1.outgoing())) == 0
assert len(list(node_2.outgoing())) == 0
assert len(list(node_3.outgoing())) == 0
assert node_1.output == sys.stdout
assert node_2.input == sys.stdin
assert node_3.input == sys.stdin
(node_1 < node_2) < node_3
assert not node_3.has_incoming()
assert node_2.has_incoming(node_3, ('output', 'input'), True)
assert node_1.has_incoming(node_2, ('output', 'input'), True)
# test operators
def test_gt_to_node_no_block():
tool_1 = Tool(tool_1_def, "T1")
tool_2 = Tool(tool_1_def, "T2")
tool_3 = Tool(tool_1_def, "T3")
p = Pipeline()
node_1 = p.add(tool_1)
node_2 = p.add(tool_2)
node_3 = p.add(tool_3)
assert len(list(node_1.outgoing())) == 0
assert len(list(node_2.outgoing())) == 0
assert len(list(node_3.outgoing())) == 0
assert node_1._tool.options['output'] == sys.stdout
assert node_2._tool.options['input'] == sys.stdin
assert node_3._tool.options['input'] == sys.stdin
node_1 > node_2 > node_3
n_1_out = node_1._tool.options['output'].raw()
n_2_in = node_2._tool.options['input'].raw()
n_2_out = node_2._tool.options['output'].raw()
n_3_in = node_3._tool.options['output'].raw()
assert n_1_out == n_2_in
assert n_2_out == n_3_in
assert len(list(node_1.outgoing())) == 1
assert len(list(node_2.outgoing())) == 1
assert len(list(node_3.outgoing())) == 0
# test operators
def test_lt_from_node_no_block():
tool_1 = Tool(tool_1_def, "T1")
tool_2 = Tool(tool_1_def, "T2")
tool_3 = Tool(tool_1_def, "T3")
p = Pipeline()
node_1 = p.add(tool_1)
node_2 = p.add(tool_2)
node_3 = p.add(tool_3)
assert len(list(node_1.outgoing())) == 0
assert len(list(node_2.outgoing())) == 0
assert len(list(node_3.outgoing())) == 0
assert node_1.output == sys.stdout
assert node_2.input == sys.stdin
assert node_3.input == sys.stdin
node_1 < node_2 < node_3
assert not node_3.has_incoming()
assert node_2.has_incoming(node_3, ('output', 'input'), True)
assert node_1.has_incoming(node_2, ('output', 'input'), True)
# test operators
def test_gt_to_option():
tool_1 = Tool(tool_1_def, "T1")
tool_2 = Tool(tool_1_def, "T2")
tool_3 = Tool(tool_1_def, "T3")
p = Pipeline()
node_1 = p.add(tool_1)
node_2 = p.add(tool_2)
node_3 = p.add(tool_3)
assert len(list(node_1.outgoing())) == 0
assert len(list(node_2.outgoing())) == 0
assert len(list(node_3.outgoing())) == 0
assert node_1._tool.options['output'] == sys.stdout
assert node_2._tool.options['input'] == sys.stdin
assert node_3._tool.options['input'] == sys.stdin
(node_1 > node_2.input) > node_3.input
n_1_out = node_1._tool.options['output'].raw()
n_2_in = node_2._tool.options['input'].raw()
n_2_out = node_2._tool.options['output'].raw()
n_3_in = node_3._tool.options['output'].raw()
assert n_1_out == n_2_in
assert n_2_out == n_3_in
assert len(list(node_1.outgoing())) == 1
assert len(list(node_2.incoming())) == 1
assert len(list(node_2.outgoing())) == 1
assert len(list(node_3.outgoing())) == 0
# test operators
def test_lt_from_option():
tool_1 = Tool(tool_1_def, "T1")
tool_2 = Tool(tool_1_def, "T2")
tool_3 = Tool(tool_1_def, "T3")
p = Pipeline()
node_1 = p.add(tool_1)
node_2 = p.add(tool_2)
node_3 = p.add(tool_3)
assert len(list(node_1.outgoing())) == 0
assert len(list(node_2.outgoing())) == 0
assert len(list(node_3.outgoing())) == 0
assert node_1.output == sys.stdout
assert node_2.input == sys.stdin
assert node_3.input == sys.stdin
(node_1 < node_2.output) < node_3.output
assert not node_3.has_incoming()
assert node_2.has_incoming(node_3, ('output', 'input'), True)
assert node_1.has_incoming(node_2, ('output', 'input'), True)
# test operators
def test_gt_to_option_no_blocks():
tool_1 = Tool(tool_1_def, "T1")
tool_2 = Tool(tool_1_def, "T2")
tool_3 = Tool(tool_1_def, "T3")
p = Pipeline()
node_1 = p.add(tool_1)
node_2 = p.add(tool_2)
node_3 = p.add(tool_3)
assert len(list(node_1.outgoing())) == 0
assert len(list(node_2.outgoing())) == 0
assert len(list(node_3.outgoing())) == 0
assert node_1.output == sys.stdout
assert node_2.input == sys.stdin
assert node_3.input == sys.stdin
node_1 > node_2.input # this does not work in a single line!
node_2 > node_3.input
assert not node_3.input.raw() == sys.stdin
# check the graph structure
assert node_2.has_incoming(node_1, ('output', 'input'),
True, node_1.output)
assert node_3.has_incoming(node_2, ('output', 'input'),
True, node_2.output)
assert not node_3.has_outgoing()
# test operators
def test_lt_from_option_no_block():
tool_1 = Tool(tool_1_def, "T1")
tool_2 = Tool(tool_1_def, "T2")
tool_3 = Tool(tool_1_def, "T3")
p = Pipeline()
node_1 = p.add(tool_1)
node_2 = p.add(tool_2)
node_3 = p.add(tool_3)
assert len(list(node_1.outgoing())) == 0
assert | |
CONTENT = """huc basin
<!--10n 60s-->
01 New England
0101 St. John
010100 St. John
01010001 Upper St. John
01010002 Allagash
01010003 Fish
01010004 Aroostook
01010005 Meduxnekeag
0102 Penobscot
010200 Penobscot
01020001 West Branch Penobscot
01020002 East Branch Penobscot
01020003 Mattawamkeag
01020004 Piscataquis
01020005 Lower Penobscot
0103 Kennebec
010300 Kennebec
01030001 Upper Kennebec
01030002 Dead
01030003 Lower Kennebec
0104 Androscoggin
010400 Androscoggin
01040001 Upper Androscoggin
01040002 Lower Androscoggin
0105 Maine Coastal
010500 Maine Coastal
01050001 St. Croix
01050002 Maine Coastal
01050003 St. George-Sheepscot
0106 Saco
010600 Saco
01060001 Presumpscot
01060002 Saco
01060003 Piscataqua-Salmon Falls
0107 Merrimack
010700 Merrimack
01070001 Pemigewasset
01070002 Merrimack
01070003 Contoocook
01070004 Nashua
01070005 Concord
0108 Connecticut
010801 Upper Connecticut
01080101 Upper Connecticut
01080102 Passumpsic
01080103 Waits
01080104 Upper Connecticut-Mascoma
01080105 White
01080106 Black-Ottauquechee
01080107 West
010802 Lower Connecticut
01080201 Middle Connecticut
01080202 Miller
01080203 Deerfield
01080204 Chicopee
01080205 Lower Connecticut
01080206 Westfield
01080207 Farmington
0109 Massachusetts-Rhode Island Coastal
010900 Massachusetts-Rhode Island Coastal
01090001 Charles
01090002 Cape Cod
01090003 Blackstone
01090004 Narragansett
01090005 Pawcatuck-Wood
0110 Connecticut Coastal
011000 Connecticut Coastal
01100001 Quinebaug
01100002 Shetucket
01100003 Thames
01100004 Quinnipiac
01100005 Housatonic
01100006 Saugatuck
01100007 Long Island Sound
0111 St. Francois
011100 St. Francois
01110000 St. Francois
02 Mid Atlantic
0201 Richelieu
020100 Richelieu
02010001 Lake George
02010002 Otter
02010003 Winooski
02010004 Ausable
02010005 Lamoille
02010006 Great Chazy-Saranac
02010007 Missisquoi
0202 Upper Hudson
020200 Upper Hudson
02020001 Upper Hudson
02020002 Sacandaga
02020003 Hudson-Hoosic
02020004 Mohawk
02020005 Schoharie
02020006 Middle Hudson
02020007 Rondout
02020008 Hudson-Wappinger
0203 Lower Hudson-Long Island
020301 Lower Hudson
02030101 Lower Hudson
02030102 Bronx
02030103 Hackensack-Passaic
02030104 Sandy Hook-Staten Island
02030105 Raritan
020302 Long Island
02030201 Northern Long Island
02030202 Southern Long Island
0204 Delaware
020401 Upper Delaware
02040101 Upper Delaware
02040102 East Branch Delaware
02040103 Lackawaxen
02040104 Middle Delaware-Mongaup-Brodhead
02040105 Middle Delaware-Musconetcong
02040106 Lehigh
020402 Lower Delaware
02040201 Crosswicks-Neshaminy
02040202 Lower Delaware
02040203 Schuylkill
02040204 Delaware Bay
02040205 Brandywine-Christina
02040206 Cohansey-Maurice
02040207 Broadkill-Smyrna
020403 New Jersey Coastal
02040301 Mullica-Toms
02040302 Great Egg Harbor
0205 Susquehanna
020501 Upper Susquehanna
02050101 Upper Susquehanna
02050102 Chenango
02050103 Owego-Wappasening
02050104 Tioga
02050105 Chemung
02050106 Upper Susquehanna-Tunkhannock
02050107 Upper Susquehanna-Lackawanna
020502 West Branch Susquehanna
02050201 Upper West Branch Susquehanna
02050202 Sinnemahoning
02050203 Middle West Branch Susquehanna
02050204 Bald Eagle
02050205 Pine
02050206 Lower West Branch Susquehanna
020503 Lower Susquehanna
02050301 Lower Susquehanna-Penns
02050302 Upper Juniata
02050303 Raystown
02050304 Lower Juniata
02050305 Lower Susquehanna-Swatara
02050306 Lower Susquehanna
0206 Upper Chesapeake
020600 Upper Chesapeake
02060001 Upper Chesapeake Bay
02060002 Chester-Sassafras
02060003 Gunpowder-Patapsco
02060004 Severn
02060005 Choptank
02060006 Patuxent
02060007 Blackwater-Wicomico
02060008 Nanticoke
02060009 Pocomoke
02060010 Chincoteague
0207 Potomac
020700 Potomac
02070001 South Branch Potomac
02070002 North Branch Potomac
02070003 Cacapon-Town
02070004 Conococheague-Opequon
02070005 South Fork Shenandoah
02070006 North Fork Shenandoah
02070007 Shenandoah
02070008 Middle Potomac-Catoctin
02070009 Monocacy
02070010 Middle Potomac-Anacostia-Occoquan
02070011 Lower Potomac
0208 Lower Chesapeake
020801 Lower Chesapeake
02080101 Lower Chesapeake Bay
02080102 Great Wicomico-Piankatank
02080103 Rapidan-Upper Rappahannock
02080104 Lower Rappahannock
02080105 Mattaponi
02080106 Pamunkey
02080107 York
02080108 Lynnhaven-Poquoson
02080109 Western Lower Delmarva
02080110 Eastern Lower Delmarva
020802 James
02080201 Upper James
02080202 Maury
02080203 Middle James-Buffalo
02080204 Rivanna
02080205 Middle James-Willis
02080206 Lower James
02080207 Appomattox
02080208 Hampton Roads
03 South Atlantic-Gulf
0301 Chowan-Roanoke
030101 Roanoke
03010101 Upper Roanoke
03010102 Middle Roanoke
03010103 Upper Dan
03010104 Lower Dan
03010105 Banister
03010106 Roanoke Rapids
03010107 Lower Roanoke
030102 Albemarle-Chowan
03010201 Nottoway
03010202 Blackwater
03010203 Chowan
03010204 Meherrin
03010205 Albemarle
0302 Neuse-Pamlico
030201 Pamlico
03020101 Upper Tar
03020102 Fishing
03020103 Lower Tar
03020104 Pamlico
03020105 Pamlico Sound
03020106 Bogue-Core Sounds
030202 Neuse
03020201 Upper Neuse
03020202 Middle Neuse
03020203 Contentnea
03020204 Lower Neuse
0303 Cape Fear
030300 Cape Fear
03030001 New
03030002 Haw
03030003 Deep
03030004 Upper Cape Fear
03030005 Lower Cape Fear
03030006 Black
03030007 Northeast Cape Fear
0304 Pee Dee
030401 Upper Pee Dee
03040101 Upper Yadkin
03040102 South Yadkin
03040103 Lower Yadkin
03040104 Upper Pee Dee
03040105 Rocky
030402 Lower Pee Dee
03040201 Lower Pee Dee
03040202 Lynches
03040203 Lumber
03040204 Little Pee Dee
03040205 Black
03040206 Waccamaw
03040207 Carolina Coastal-Sampit
0305 Edisto-Santee
030501 Santee
03050101 Upper Catawba
03050102 South Fork Catawba
03050103 Lower Catawba
03050104 Wateree
03050105 Upper Broad
03050106 Lower Broad
03050107 Tyger
03050108 Enoree
03050109 Saluda
03050110 Congaree
03050111 Lake Marion
03050112 Santee
030502 Edisto-South Carolina Coastal
03050201 Cooper
03050202 South Carolina Coastal
03050203 North Fork Edisto
03050204 South Fork Edisto
03050205 Edisto
03050206 Four Hole Swamp
03050207 Salkehatchie
03050208 Broad-St. Helena
0306 Ogeechee-Savannah
030601 Savannah
03060101 Seneca
03060102 Tugaloo
03060103 Upper Savannah
03060104 Broad
03060105 Little
03060106 Middle Savannah
03060107 Stevens
03060108 Brier
03060109 Lower Savannah
030602 Ogeechee
03060201 Upper Ogeechee
03060202 Lower Ogeechee
03060203 Canoochee
03060204 Ogeechee Coastal
0307 Altamaha - St. Marys
030701 Altamaha
03070101 Upper Oconee
03070102 Lower Oconee
03070103 Upper Ocmulgee
03070104 Lower Ocmulgee
03070105 Little Ocmulgee
03070106 Altamaha
03070107 Ohoopee
030702 St. Marys - Satilla
03070201 Satilla
03070202 Little Satilla
03070203 Cumberland-St. Simons
03070204 St. Marys
03070205 Nassau
0308 St. Johns
030801 St. Johns
03080101 Upper St. Johns
03080102 Oklawaha
03080103 Lower St. Johns
030802 East Florida Coastal
03080201 Daytona - St. Augustine
03080202 Cape Canaveral
03080203 Vero Beach
0309 Southern Florida
030901 Kissimmee
03090101 Kissimmee
03090102 Northern Okeechobee Inflow
03090103 Western Okeechobee Inflow
030902 Southern Florida
03090201 Lake Okeechobee
03090202 Everglades
03090203 Florida Bay-Florida Keys
03090204 Big Cypress Swamp
03090205 Caloosahatchee
0310 Peace-Tampa Bay
031001 Peace
03100101 Peace
03100102 Myakka
03100103 Charlotte Harbor
031002 Tampa Bay
03100201 Sarasota Bay
03100202 Manatee
03100203 Little Manatee
03100204 Alafia
03100205 Hillsborough
03100206 Tampa Bay
03100207 Crystal-Pithlachascotee
03100208 Withlacoochee
0311 Suwannee
031101 Aucilla-Waccasassa
03110101 Waccasassa
03110102 Econfina-Steinhatchee
03110103 Aucilla
031102 Suwannee
03110201 Upper Suwannee
03110202 Alapaha
03110203 withlacoochee
03110204 Little
03110205 Lower Suwannee
03110206 Santa Fe
0312 Ochlockonee
031200 Ochlockonee. Georgia
03120001 Apalachee Bay-St. Marks
03120002 Upper Ochlockonee
03120003 Lower Ochlockonee
0313 Apalachicola
031300 Apalachicola
03130001 Upper Chattahoochee
03130002 Middle Chattahoochee-Lake Harding
03130003 Middle Chattahoochee-Walter F. George Reservoir
03130004 Lower Chattahoochee
03130005 Upper Flint
03130006 Middle Flint
03130007 Kinchafoonee-Muckalee
03130008 Lower Flint
03130009 Ichawaynochaway
03130010 Spring
03130011 Apalachicola
03130012 Chipola
03130013 New
03130014 Apalachicola Bay
0314 Choctawhatchee - Escambia
031401 Florida Panhandle Coastal
03140101 St. Andrew-St. <NAME>
03140102 Choctawhatchee Bay
03140103 Yellow
03140104 Blackwater
03140105 Pensacola Bay
03140106 Perdido
03140107 Perdido Bay
031402 Choctawhatchee
03140201 Upper Choctawhatchee
03140202 Pea
03140203 Lower Choctawhatchee
031403 Escambia
03140301 Upper Conecuh
03140302 Patsaliga
03140303 Sepulga
03140304 Lower Conecuh
03140305 Escambia
0315 Alabama
031501 Coosa-Tallapoosa
03150101 Conasauga
03150102 Coosawattee
03150103 Oostanaula
03150104 Etowah
03150105 Upper Coosa
03150106 Middle Coosa
03150107 Lower Coosa
03150108 Upper Tallapoosa
03150109 Middle Tallapoosa
03150110 Lower Tallapoosa
031502 Alabama
03150201 Upper Alabama
03150202 Cahaba
03150203 Middle Alabama
03150204 Lower Alabama
0316 Mobile - Tombigbee
031601 Black Warrior - Tombigbee
03160101 Upper Tombigbee
03160102 Town
03160103 Buttahatchee
03160104 Tibbee
03160105 Luxapallila
03160106 Middle Tombigbee-Lubbub
03160107 Sipsey
03160108 Noxubee
03160109 Mulberry
03160110 Sipsey Fork
03160111 Locust
03160112 Upper Black Warrior
03160113 Lower Black Warrior
031602 Mobile Bay- Tombigbee
03160201 Middle Tombigbee-Chickasaw
03160202 Sucarnoochee
03160203 Lower Tambigbee
03160204 Mobile - Tensaw
03160205 Mobile Bay
0317 Pascagoula
031700 Pascagoula. Mississippi
03170001 Chunky-Okatibbee
03170002 Upper Chickasawhay
03170003 Lower Chickasawhay
03170004 Upper Leaf
03170005 Lower Leaf
03170006 Pascagoula
03170007 Black
03170008 Escatawpa
03170009 Mississippi Coastal
0318 Pearl
031800 Pearl
03180001 Upper Pearl
03180002 Middle Pearl-Strong
03180003 Middle Pearl-Silver
03180004 Lower Pearl. Mississippi
03180005 Bogue Chitto
04 Great Lakes
0401 Western Lake Superior
040101 Northwestern Lake Superior
04010101 Baptism-Brule
04010102 Beaver-Lester
040102 St. Louis
04010201 St. Louis
04010202 Cloquet
040103 Southwestern Lake Superior
04010301 Beartrap-Nemadji
04010302 Bad-Montreal
0402 Southern Lake Superior-Lake Superior
040201 Southcentral Lake Superior
04020101 Black-Presque Isle
04020102 Ontonagon
04020103 Keweenaw Peninsula
04020104 Sturgeon
04020105 Dead-Kelsey
040202 Southeastern Lake Superior
04020201 Betsy-Chocolay
04020202 Tahquamenon
04020203 Waiska
040203 Lake Superior
04020300 Lake Superior
0403 Northwestern Lake Michigan
040301 Northwestern Lake Michigan
04030101 Manitowoc-Sheboygan
04030102 Door-Kewaunee
04030103 Duck-Pensaukee
04030104 Oconto
04030105 Peshtigo
04030106 Brule
04030107 Michigamme
04030108 Menominee
04030109 Cedar-Ford
04030110 Escanaba
04030111 Tacoosh-Whitefish
04030112 Fishdam-Sturgeon
040302 Fox
04030201 Upper Fox
04030202 Wolf
04030203 Lake Winnebago
04030204 Lower Fox
0404 Southwestern Lake Michigan
040400 Southwestern Lake Michigan
04040001 Little Calumet-Galien
04040002 Pike-Root
04040003 Milwaukee
0405 Southeastern Lake Michigan
040500 Southeastern Lake Michigan
04050001 St. Joseph
04050002 Black-Macatawa
04050003 Kalamazoo
04050004 Upper Grand
04050005 Maple
04050006 Lower Grand
04050007 Thornapple
0406 Northeastern Lake Michigan-Lake Michigan
040601 Northeastern Lake Michigan
04060101 Pere Marquette-White
04060102 Muskegon
04060103 Manistee
04060104 Betsie-Platte
04060105 Boardman-Charlevoix
04060106 Manistique
04060107 Brevoort-Millecoquins
040602 Lake Michigan
04060200 Lake Michigan
0407 Northwestern Lake Huron
040700 Northwestern Lake Huron
04070001 St. Marys
04070002 Carp-Pine
04070003 Lone Lake-Ocqueoc
04070004 Cheboygan
04070005 Black
04070006 Thunder Bay
04070007 Au Sable
0408 Southwestern Lake Huron-Lake Huron
040801 Southwestern Lake Huron
04080101 Au Gres-Rifle
04080102 Kawkawlin-Pine
04080103 Pigeon-Wiscoggin
04080104 Birch-Willow
040802 Saginaw
04080201 Tittabawassee
04080202 Pine
04080203 Shiawassee
04080204 Flint
04080205 Cass
04080206 Saginaw
040803 Lake Huron
04080300 Lake Huron
0409 St. Clair-Detroit
040900 St. Clair-Detroit
04090001 St. Clair
04090002 Lake St. Clair
04090003 Clinton
04090004 Detroit
04090005 Huron
0410 Western Lake Erie
041000 Western Lake Erie
04100001 Ottawa-Stony
04100002 Raisin
04100003 St. Joseph
04100004 St. Marys
04100005 Upper Maumee
04100006 Tiffin
04100007 Auglaize
04100008 Blanchard
04100009 Lower Maumee
04100010 Cedar-Portage
04100011 Sandusky
04100012 Huron-Vermilion
0411 Southern Lake Erie
041100 Southern Lake Erie
04110001 Black-Rocky
04110002 Cuyahoga
04110003 Ashtabula-Chagrin
04110004 Grand
0412 Eastern Lake Erie-Lake Erie
041201 Eastern Lake Erie
04120101 Chautauqua-Conneaut
04120102 Cattaraugus
04120103 Buffalo-Eighteenmile
04120104 Niagara
041202 Lake Erie
04120200 Lake Erie
0413 Southwestern Lake Ontario
041300 Southwestern Lake Ontario
04130001 Oak Orchard-Twelvemile
04130002 Upper Genesee
04130003 Lower Genesee
0414 Southeastern Lake Ontario
041401 Southeastern Lake Ontario
04140101 Irondequoit-Ninemile
04140102 Salmon-Sandy
041402 Oswego
04140201 Seneca
04140202 Oneida
04140203 Oswego
0415 Northeastern Lake Ontario-Lake Ontario-St. Lawrence
041501 Northeastern Lake Ontario
04150101 Black
04150102 Chaumont-Perch
041502 Lake Ontario
04150200 Lake Ontario
041503 St. Lawrence
04150301 Upper St. Lawrence
04150302 Oswegatchie
04150303 Indian
04150304 Grass
04150305 Raquette
04150306 St. Regis
04150307 English-Salmon
05 Ohio
0501 Allegheny
050100 Allegheny
05010001 Upper Allegheny
05010002 Conewango
05010003 Middle Allegheny-Tionesta
05010004 French
05010005 Clarion
05010006 Middle Allegheny-Redbank
05010007 Conemaugh
05010008 Kiskiminetas
05010009 Lower Allegheny
0502 Monongahela
050200 Monongahela
05020001 Tygart Valley
05020002 West Fork
05020003 Upper Monongahela
05020004 Cheat
05020005 Lower Monongahela
05020006 Youghiogheny
0503 Upper Ohio
050301 Upper Ohio-Beaver
05030101 Upper Ohio
05030102 Shenango
05030103 Mahoning
05030104 Beaver
05030105 Connoquenessing
05030106 Upper Ohio-Wheeling
050302 Upper Ohio-Little Kanawha
05030201 Little Muskingum-Middle Island
05030202 Upper Ohio-Shade
05030203 Little Kanawha
05030204 Hocking
0504 Muskingum
050400 Muskingum
05040001 Tuscarawas
05040002 Mohican
05040003 Walhonding
05040004 Muskingum
05040005 Wills
05040006 Licking
0505 Kanawha
050500 Kanawha
05050001 Upper New
05050002 Middle New
05050003 Greenbrier
05050004 Lower New
05050005 Gauley
05050006 Upper Kanawha
05050007 Elk
05050008 Lower Kanawha
05050009 Coal
0506 Scioto
050600 Scioto
05060001 Upper Scioto
05060002 Lower Scioto
05060003 Paint
0507 Big Sandy-Guyandotte
050701 Guyandotte
05070101 Upper Guyandotte
05070102 Lower Guyandotte
050702 Big Sandy
05070201 Tug
05070202 Upper Levisa
05070203 Lower Levisa
05070204 Big Sandy
0508 Great Miami
050800 Great Miami
05080001 Upper Great Miami
05080002 Lower Great Miami
05080003 Whitewater
0509 Middle Ohio
050901 Middle Ohio-Raccoon
05090101 Raccoon-Symmes
05090102 Twelvepole
05090103 Little Scioto-Tygarts
05090104 Little Sandy
050902 Middle Ohio-Little Miami
05090201 Ohio Brush-Whiteoak
05090202 Little Miami
05090203 Middle Ohio-Laughery
0510 Kentucky-Licking
051001 Licking
05100101 Licking
05100102 South Fork Licking
051002 Kentucky
05100201 North Fork Kentucky
05100202 Middle Fork Kentucky
05100203 South Fork Kentucky
05100204 Upper Kentucky
05100205 Lower Kentucky
0511 Green
051100 Green
05110001 Upper Green
05110002 Barren
05110003 Middle Green
05110004 Rough
05110005 Lower Green
05110006 Pond
0512 Wabash
051201 Wabash
05120101 Upper Wabash
05120102 Salamonie
05120103 Mississinewa
05120104 Eel
05120105 Middle Wabash-Deer
05120106 Tippecanoe
05120107 Wildcat
05120108 Middle Wabash-Little Vermilion
05120109 Vermilion
05120110 Sugar
05120111 Middle Wabash-Busseron
05120112 Embarras
05120113 Lower Wabash
05120114 Little Wabash
05120115 Skillet
051202 Patoka-White
05120201 Upper White
05120202 Lower White
05120203 Eel
05120204 Driftwood
05120205 Flatrock-Haw
05120206 Upper East Fork White
05120207 Muscatatuck
05120208 Lower East Fork White
05120209 Patoka
0513 Cumberland
051301 Upper Cumberland
05130101 Upper Cumberland
05130102 Rockcastle
05130103 Upper Cumberland-Lake Cumberland
05130104 South Fork Cumberland
05130105 Obey
05130106 Upper Cumberland-Cordell Hull
05130107 Collins
05130108 Caney
051302 Lower Cumberland
05130201 Lower Cumberland-Old Hickory Lake
05130202 Lower Cumberland-Sycamore
05130203 Stones
05130204 Harpeth
05130205 Lower Cumberland
05130206 Red
0514 Lower Ohio
051401 Lower Ohio-Salt
05140101 Silver-Little Kentucky
05140102 Salt
05140103 Rolling Fork
05140104 Blue-Sinking
051402 Lower Ohio
05140201 Lower Ohio-Little Pigeon
05140202 Highland-Pigeon
05140203 Lower Ohio-Bay
05140204 Saline
05140205 Tradewater
05140206 Lower Ohio
06 Tennessee
0601 Upper Tennessee
060101 French Broad-Holston
06010101 North Fork Holston
06010102 South Fork Holston
06010103 Watauga
06010104 Holston
06010105 Upper French Broad
06010106 Pigeon
06010107 Lower French Broad
06010108 Nolichucky
060102 Upper Tennessee
06010201 Watts Bar Lake
06010202 Upper Little Tennessee
06010203 Tuckasegee
06010204 Lower Little Tennessee
06010205 Upper Clinch
06010206 Powell
06010207 Lower Clinch
06010208 Emory
0602 Middle Tennessee-Hiwassee
060200 Middle Tennessee-Hiwassee
06020001 Middle Tennessee-Chickamauga
06020002 Hiwassee
06020003 Ocoee
06020004 Sequatchie
0603 Middle Tennessee-Elk
060300 Middle Tennessee-Elk
06030001 Guntersville Lake
06030002 Wheeler Lake
06030003 Upper Elk
06030004 Lower Elk
06030005 Pickwick Lake
06030006 Bear
0604 Lower Tennessee
060400 Lower Tennessee
06040001 Lower Tennessee-Beech
06040002 Upper Duck
06040003 Lower Duck
06040004 Buffalo
06040005 Kentucky Lake
06040006 Lower Tennessee
07 Upper Mississippi
0701 Mississippi Headwaters
070101 Mississippi Headwaters
07010101 Mississippi Headwaters
07010102 Leech Lake
07010103 Prairie-Willow
07010104 Elk-Nokasippi
07010105 Pine
07010106 Crow Wing
07010107 Redeye
07010108 Long Prairie
070102 Upper Mississippi-Crow-Rum
07010201 Platte-Spunk
07010202 Sauk
07010203 Clearwater-Elk
07010204 Crow
07010205 South Fork Crow
07010206 Twin Cities
07010207 Rum
0702 Minnesota
070200 Minnesota
07020001 Upper Minnesota
07020002 Pomme De Terre
07020003 Lac Qui Parle
07020004 Hawk-Yellow Medicine
07020005 Chippewa
07020006 Redwood
07020007 Middle Minnesota
07020008 Cottonwood
07020009 Blue Earth
07020010 Watonwan
07020011 Le Sueur
07020012 Lower Minnesota
0703 St. Croix
070300 St. Croix
07030001 Upper St. Croix
07030002 Namekagon
07030003 Kettle
07030004 Snake
07030005 Lower St. Croix
0704 Upper Mississippi-Black-Root
070400 Upper Mississippi-Black-Root
07040001 Rush-Vermillion
07040002 Cannon
07040003 Buffalo-Whitewater
07040004 Zumbro
07040005 Trempealeau
07040006 La Crosse-Pine
07040007 Black
07040008 Root
0705 Chippewa
070500 Chippewa
07050001 Upper Chippewa
07050002 Flambeau
07050003 South Fork Flambeau
07050004 Jump
07050005 Lower Chippewa
07050006 Eau Claire
07050007 Red Cedar
0706 Upper Mississippi-Maquoketa-Plum
070600 Upper Mississippi-Maquoketa-Plum
07060001 Coon-Yellow
07060002 Upper Iowa
07060003 Grant-Little Maquoketa
07060004 Turkey
07060005 Apple-Plum
07060006 Maquoketa
0707 Wisconsin
070700 Wisconsin
07070001 Upper Wisconsin
07070002 Lake Dubay
07070003 Castle Rock
07070004 Baraboo
07070005 Lower Wisconsin
07070006 Kickapoo
0708 Upper Mississippi-Iowa-Skunk-Wapsipinicon
070801 Upper Mississippi-Skunk-Wapsipinicon
07080101 Copperas-Duck
07080102 Upper Wapsipinicon
07080103 Lower Wapsipinicon
07080104 Flint-Henderson
07080105 South Skunk
07080106 North Skunk
07080107 Skunk
070802 Iowa
07080201 Upper Cedar
07080202 Shell Rock
07080203 Winnebago
07080204 West Fork Cedar
07080205 Middle Cedar
07080206 Lower Cedar
07080207 Upper Iowa
07080208 Middle Iowa
07080209 Lower Iowa
0709 Rock
070900 Rock
07090001 Upper Rock
07090002 Crawfish
07090003 Pecatonica
07090004 Sugar
07090005 Lower Rock
07090006 Kishwaukee
07090007 Green
0710 Des Moines
071000 Des Moines
07100001 Des Moines Headwaters
07100002 Upper Des Moines
07100003 East Fork Des Moines
07100004 Middle Des Moines
07100005 Boone
07100006 North Raccoon
07100007 South Raccoon
07100008 Lake Red Rock
07100009 Lower Des Moines
0711 Upper Mississippi-Salt
071100 Upper Mississippi-Salt
07110001 Bear-Wyaconda
07110002 North Fabius
07110003 South Fabius
07110004 The Sny
07110005 North Fork Salt
07110006 South Fork Salt
07110007 Salt
07110008 Cuivre
07110009 Peruque-Piasa
0712 Upper Illinois
071200 Upper Illinois
07120001 Kankakee
07120002 Iroquois
07120003 Chicago
07120004 Des Plaines
07120005 Upper Illinois
07120006 Upper Fox
07120007 Lower Fox
0713 Lower Illinois
071300 Lower Illinois
07130001 Lower Illinois-Senachwine Lake
07130002 Vermilion
07130003 Lower Illinois-Lake Chautauqua
07130004 Mackinaw
07130005 Spoon
07130006 Upper Sangamon
07130007 South Fork Sangamon
07130008 Lower Sangamon
07130009 Salt
07130010 La Moine
07130011 Lower Illinois
07130012 Macoupin
0714 Upper Mississippi-Kaskaskia-Meramec
071401 Upper Mississippi-Meramec
07140101 Cahokia-Joachim
07140102 Meramec
07140103 Bourbeuse
07140104 Big
07140105 Upper Mississippi-Cape Girardeau
07140106 Big Muddy
07140107 Whitewater
07140108 Cache
071402 Kaskaskia
07140201 Upper Kaskaskia
07140202 Middle Kaskaskia
07140203 Shoal
07140204 Lower Kaskaskia
08 Lower Mississippi
0801 Lower Mississippi-Hatchie
080101 Lower Mississippi-Memphis
08010100 Lower Mississippi-Memphis
080102 Hatchie-Obion
08010201 Bayou De Chien-Mayfield
08010202 Obion
08010203 South Fork Obion
08010204 North Fork Forked Deer
08010205 South Fork Forked Deer
08010206 Forked Deer
08010207 Upper Hatchie
08010208 Lower Hatchie
08010209 Loosahatchie
08010210 Wolf
08010211 Horn Lake-Nonconnah
0802 Lower Mississippi - St. Francis
080201 Lower Mississippi-Helena
08020100 Lower Mississippi-Helena
080202 St. Francis
08020201 New Madrid-St. Johns
08020202 Upper St. Francis
08020203 Lower St. Francis
08020204 Little River Ditches
08020205 L'anguille
080203 Lower White
08020301 Lower White-Bayou Des Arc
08020302 Cache
08020303 Lower White
08020304 Big
080204 Lower Arkansas
08020401 Lower Arkansas
08020402 Bayou Meto
0803 Lower Mississippi - Yazoo
080301 Lower Mississippi-Greenville
08030100 Lower Mississippi-Greenville
080302 Yazoo
08030201 Little Tallahatchie
08030202 Tallahatchie
08030203 Yocona
08030204 Coldwater
08030205 Yalobusha
08030206 Upper Yazoo
08030207 Big Sunflower
08030208 Lower Yazoo
08030209 Deer-Steele
0804 Lower Red - Ouachita
080401 Upper Ouachita
08040101 Ouachita Headwaters
08040102 Upper Ouachita
08040103 Little Missouri
080402 Lower Ouachita
08040201 Lower Ouachita-Smackover
08040202 Lower Ouachita-Bayou De Loutre
08040203 Upper Saline
08040204 Lower Saline
08040205 Bayou Bartholomew
08040206 Bayou D'arbonne
08040207 Lower Ouachita
080403 Lower Red
08040301 Lower Red
08040302 Castor
08040303 Dugdemona
08040304 Little
08040305 Black
08040306 Bayou Cocodrie
0805 Boeuf-Tensas
080500 Boeuf-Tensas
08050001 Boeuf
08050002 Bayou Macon
08050003 Tensas
0806 Lower Mississippi - Big Black
080601 Lower Mississippi-Natchez
08060100 Lower Mississippi-Natchez
080602 Big Black - Homochitto
08060201 Upper Big Black
08060202 Lower Big Black
08060203 Bayou Pierre
08060204 Coles Creek
08060205 Homochitto
08060206 Buffalo
0807 Lower Mississippi-Lake Maurepas
080701 Lower Mississippi-Baton Rouge
08070100 Lower Mississippi-Baton Rouge
080702 Lake Maurepas
08070201 Bayou Sara-Thompson
08070202 Amite
08070203 Tickfaw
08070204 Lake Maurepas
08070205 Tangipahoa
080703 Lower Grand
08070300 Lower Grand
0808 Louisiana Coastal
080801 Atchafalaya - Vermilion
08080101 Atchafalaya
08080102 Bayou Teche
08080103 Vermilion
080802 Calcasieu - Mermentau
08080201 Mermentau Headwaters
08080202 Mermentau
08080203 Upper Calcasieu
08080204 Whisky Chitto
08080205 West Fork Calcasieu
08080206 Lower Calcasieu
0809 Lower Mississippi
080901 Lower Mississippi-New Orleans
08090100 Lower Mississippi-New Orleans
080902 Lake Pontchartrain
08090201 Liberty Bayou-Tchefuncta
08090202 Lake Pontchartrain
08090203 Eastern Louisiana Coastal
080903 Central Louisiana Coastal
08090301 East Central Louisiana Coastal
08090302 West Central Louisiana Coastal
09 Souris-Red-Rainy
0901 Souris
090100 Souris
09010001 Upper Souris
09010002 Des Lacs
09010003 Lower Souris
09010004 Willow
09010005 Deep
0902 Red
090201 Upper Red
09020101 Bois De Sioux
09020102 Mustinka
09020103 Otter Tail
09020104 Upper Red
09020105 Western Wild Rice
09020106 Buffalo
09020107 Elm-Marsh
09020108 Eastern Wild Rice
09020109 Goose
090202 Devils Lake-Sheyenne
09020201 Devils Lake
09020202 Upper Sheyenne
09020203 Middle Sheyenne
09020204 Lower Sheyenne
09020205 Maple
090203 Lower Red
09020301 Sandhill-Wilson
09020302 Red Lakes
09020303 Red Lake
09020304 Thief
09020305 Clearwater
09020306 Grand Marais-Red
09020307 Turtle
09020308 Forest
09020309 Snake
09020310 Park
09020311 Lower Red
09020312 Two Rivers
09020313 Pembina
09020314 Roseau
0903 Rainy
090300 Rainy
09030001 Rainy Headwaters
09030002 Vermilion
09030003 Rainy Lake
09030004 Upper Rainy
09030005 Little Fork
09030006 Big Fork
09030007 Rapid
09030008 Lower Rainy
09030009 Lake of the Woods
10 Missouri
1001 Saskatchewan
100100 Saskatchewan
10010001 Belly
10010002 St. Mary
1002 Missouri Headwaters
100200 Missouri Headwaters
10020001 Red Rock
10020002 Beaverhead
10020003 Ruby
10020004 Big Hole
10020005 Jefferson
10020006 Boulder
10020007 Madison
10020008 Gallatin
1003 Missouri-Marias
100301 Upper Missouri
10030101 Upper Missouri
10030102 Upper Missouri-Dearborn
10030103 Smith
10030104 Sun
10030105 Belt
100302 Marias
10030201 Two Medicine
10030202 Cut Bank
10030203 Marias
10030204 Willow
10030205 Teton
1004 Missouri-Musselshell
100401 Fort Peck Lake
10040101 Bullwhacker-Dog
10040102 Arrow
10040103 Judith
10040104 Fort Peck Reservoir
10040105 Big Dry
10040106 Little Dry
100402 Musselshell
10040201 Upper Musselshell
10040202 Middle Musselshell
10040203 Flatwillow
10040204 Box Elder
10040205 Lower Musselshell
1005 Milk
100500 Milk
10050001 Milk Headwaters
10050002 Upper Milk
10050003 Wild Horse Lake
10050004 Middle Milk
10050005 Big Sandy
10050006 Sage
10050007 Lodge
10050008 Battle
10050009 Peoples
10050010 Cottonwood
10050011 Whitewater
10050012 Lower Milk
10050013 Frenchman
10050014 Beaver
10050015 Rock
10050016 Porcupine
1006 Missouri-Poplar
100600 Missouri-Poplar
10060001 Prarie Elk-Wolf
10060002 Redwater
10060003 Poplar
10060004 West Fork Poplar
10060005 Charlie-Little Muddy
10060006 Big Muddy
10060007 Brush Lake closed basin
1007 Upper Yellowstone
100700 Upper Yellowstone
10070001 Yellowstone Headwaters
10070002 Upper Yellowstone
10070003 Shields
10070004 Upper Yellowstone-Lake Basin
10070005 Stillwater
10070006 Clarks Fork Yellowstone
10070007 Upper Yellowstone-Pompeys Pillar
10070008 Pryor
1008 Big Horn
100800 Big Horn
10080001 Upper Wind
10080002 Little Wind
10080003 Popo Agie
10080004 Muskrat
10080005 Lower Wind
10080006 Badwater
10080007 Upper Bighorn
10080008 Nowood
10080009 Greybull
10080010 Big Horn Lake
10080011 Dry
10080012 North Fork Shoshone
10080013 South Fork Shoshone
10080014 Shoshone
10080015 Lower Bighorn
10080016 Little Bighorn
1009 Powder-Tongue
100901 Tongue
10090101 Upper Tongue
10090102 Lower Tongue
100902 Powder
10090201 Middle Fork Powder
10090202 Upper Powder
10090203 South Fork Powder
10090204 Salt
10090205 Crazy Woman
10090206 Clear
10090207 Middle Powder
10090208 Little Powder
10090209 Lower Powder
10090210 Mizpah
1010 Lower Yellowstone
101000 Lower Yellowstone
10100001 Lower Yellowstone-Sunday
10100002 Big Porcupine
10100003 Rosebud
10100004 Lower Yellowstone
10100005 O'fallon
1011 Missouri-Little Missouri
101101 Lake Sakakawea
10110101 Lake Sakakawea
10110102 Little Muddy
101102 Little Missouri
10110201 Upper Little Missouri
10110202 Boxelder
10110203 Middle Little Missouri
10110204 Beaver
10110205 Lower Little Missouri
1012 Cheyenne
101201 Cheyenne
10120101 Antelope
10120102 Dry Fork Cheyenne
10120103 Upper Cheyenne
10120104 Lance
10120105 Lightning
10120106 Angostura Reservoir
10120107 Beaver
10120108 Hat
10120109 Middle Cheyenne-Spring
10120110 Rapid
10120111 Middle Cheyenne-Elk
10120112 Lower Cheyenne
10120113 Cherry
101202 Belle Fourche
10120201 Upper Belle Fourche
10120202 Lower Belle Fourche
10120203 Redwater
1013 Missouri-Oahe
101301 Lake Oahe
10130101 Painted Woods-Square Butte
10130102 Upper Lake Oahe
10130103 Apple
10130104 Beaver
10130105 Lower Lake Oahe
10130106 West Missouri Coteau
101302 Cannonball-Heart-Knife
10130201 Knife
10130202 Upper Heart
10130203 Lower Heart
10130204 Upper Cannonball
10130205 Cedar
10130206 Lower Cannonball
101303 Grand-Moreau
10130301 North Fork Grand
10130302 South Fork Grand
10130303 Grand
10130304 South Fork Moreau
10130305 Upper Moreau
10130306 Lower Moreau
1014 Missouri-White
101401 Fort Randall Reservoir
10140101 Fort Randall Reservoir
10140102 Bad
10140103 Medicine Knoll
10140104 Medicine
10140105 Crow
101402 White
10140201 Upper White
10140202 Middle White
10140203 Little White
10140204 Lower White
1015 Niobrara
101500 Niobrara
10150001 Ponca
10150002 Niobrara Headwaters
10150003 Upper Niobrara
10150004 Middle Niobrara
10150005 Snake
10150006 Keya Paha
10150007 Lower Niobrara
1016 James
101600 James
10160001 James Headwaters
10160002 Pipestem
10160003 Upper James
10160004 Elm
10160005 Mud
10160006 Middle James
10160007 East Missouri Coteau
10160008 Snake
10160009 Turtle
10160010 North Big Sioux Coteau
10160011 Lower James
1017 Missouri-Big Sioux
101701 Lewis and Clark Lake
10170101 Lewis and Clark Lake
10170102 Vermillion
10170103 South Big Sioux Coteau
101702 Big Sioux
10170201 Middle Big Sioux Coteau
10170202 Upper Big Sioux
10170203 Lower Big Sioux
10170204 Rock
1018 North Platte
101800 North Platte
10180001 North Platte Headwaters
10180002 Upper North Platte
10180003 Pathfinder-Seminoe Reservoirs
10180004 Medicine Bow
10180005 Little Medicine Bow
10180006 Sweetwater
10180007 Middle North Platte-Casper
10180008 Glendo Reservoir
10180009 Middle North Platte-Scotts Bluff
10180010 Upper Laramie
10180011 Lower Laramie
10180012 Horse
10180013 Pumpkin
10180014 Lower North Platte
1019 South Platte
101900 South Platte
10190001 South Platte Headwaters
10190002 Upper South Platte
10190003 Middle South Platte-Cherry Creek
10190004 Clear
10190005 St. Vrain
10190006 Big Thompson
10190007 Cache La Poudre
10190008 Lone Tree-Owl
10190009 Crow
10190010 Kiowa
10190011 Bijou
10190012 Middle South Platte-Sterling
10190013 Beaver
10190014 Pawnee
10190015 Upper Lodgepole
10190016 Lower Lodgepole
10190017 Sidney Draw
10190018 Lower South Platte
1020 Platte
102001 Middle Platte
10200101 Middle Platte-Buffalo
10200102 Wood
10200103 Middle Platte-Prairie
102002 Lower Platte
10200201 Lower Platte-Shell
10200202 Lower Platte
10200203 Salt
1021 Loup
102100 Loup
10210001 Upper Middle Loup
10210002 Dismal
10210003 Lower Middle Loup
10210004 South Loup
10210005 Mud
10210006 Upper North Loup
10210007 Lower North Loup
10210008 Calamus
10210009 Loup
10210010 Cedar
1022 Elkhorn
102200 Elkhorn
10220001 Upper Elkhorn
10220002 North Fork Elkhorn
10220003 Lower Elkhorn
10220004 Logan
1023 Missouri-Little Sioux
102300 Missouri-Little Sioux
10230001 Blackbird-Soldier
10230002 Floyd
10230003 Little Sioux
10230004 Monona-<NAME>
10230005 Maple
10230006 Big Papillion-Mosquito
10230007 Boyer
1024 Missouri-Nishnabotna
102400 Missouri-Nishnabotna
10240001 Keg-Weeping Water
10240002 West Nishnabotna
10240003 East Nishnabotna
10240004 Nishnabotna
10240005 Tarkio-Wolf
10240006 Little Nemaha
10240007 South Fork Big Nemaha
10240008 Big Nemaha
10240009 West Nodaway
10240010 Nodaway
10240011 Independence-Sugar
10240012 Platte
10240013 One Hundred and Two
1025 Republican
102500 Republican
10250001 Arikaree
10250002 North Fork Republican
10250003 South Fork Republican
10250004 Upper Republican
10250005 Frenchman
10250006 Stinking Water
10250007 Red Willow
10250008 Medicine
10250009 Harlan County Reservoir
10250010 Upper Sappa
10250011 Lower Sappa
10250012 South Fork Beaver
10250013 Little Beaver
10250014 Beaver
10250015 Prairie Dog
10250016 Middle Republican
10250017 Lower Republican
1026 Smoky Hill
102600 Smoky Hill
10260001 Smoky Hill Headwaters
10260002 North Fork Smoky Hill
10260003 Upper Smoky Hill
10260004 Ladder
10260005 Hackberry
10260006 Middle Smoky Hill
10260007 Big
10260008 Lower Smoky Hill
10260009 Upper Saline
10260010 Lower Saline
10260011 Upper North Fork Solomon
10260012 Lower North Fork Solomon
10260013 Upper South Fork Solomon
10260014 Lower South Fork Solomon
10260015 Solomon
1027 Kansas
102701 Kansas
10270101 Upper Kansas
10270102 Middle Kansas
10270103 Delaware
10270104 Lower Kansas
102702 Big Blue
10270201 Upper Big Blue
10270202 Middle Big Blue
10270203 West Fork Big Blue
10270204 Turkey
10270205 Lower Big Blue
10270206 Upper Little Blue
10270207 Lower Little Blue
1028 Chariton-Grand
102801 Grand
10280101 Upper Grand
10280102 Thompson
10280103 Lower Grand
102802 Chariton
10280201 Upper Chariton
10280202 Lower Chariton
10280203 Little Chariton
1029 Gasconade-Osage
102901 Osage
10290101 Upper Marais Des Cygnes
10290102 Lower Marais Des Cygnes
10290103 Little Osage
10290104 Marmaton
10290105 <NAME>
10290106 Sac
10290107 <NAME>
10290108 South Grand
10290109 Lake of the Ozarks
10290110 Niangua
10290111 Lower Osage
102902 Gasconade
10290201 Upper Gasconade
10290202 Big Piney
10290203 Lower Gasconade
1030 Lower Missouri
103001 Lower Missouri-Blackwater
10300101 Lower Missouri-Crooked
10300102 Lower Missouri-Moreau
10300103 Lamine
10300104 Blackwater
103002 Lower Missouri
10300200 Lower Missouri
11 Arkansas-White-Red
1101 Upper White
110100 Upper White
11010001 Beaver Reservoir
11010002 James
11010003 Bull Shoals Lake
11010004 Middle White
11010005 Buffalo
11010006 North Fork White
11010007 Upper Black
11010008 Current
11010009 Lower Black
11010010 Spring
11010011 Eleven Point
11010012 Strawberry
11010013 Upper White-Village
11010014 Little Red
1102 Upper Arkansas
110200 Upper Arkansas
11020001 Arkansas Headwaters
11020002 Upper Arkansas
11020003 Fountain
11020004 Chico
11020005 Upper Arkansas-Lake Meredith
11020006 Huerfano
11020007 Apishapa
11020008 Horse
11020009 Upper Arkansas-John Martin
11020010 Purgatoire
11020011 Big Sandy
11020012 Rush
11020013 Two Butte
1103 Middle Arkansas
110300 Middle Arkansas
11030001 Middle Arkansas-Lake Mckinney
11030002 Whitewoman
11030003 Arkansas-Dodge City
11030004 Coon-Pickerel
11030005 Pawnee
11030006 Buckner
11030007 Upper Walnut Creek
11030008 Lower Walnut Creek
11030009 Rattlesnake
11030010 Gar-Peace
11030011 Cow
11030012 Little Arkansas
11030013 Middle Arkansas-Slate
11030014 North Fork Ninnescah
11030015 South Fork Ninnescah
11030016 Ninnescah
11030017 Upper Walnut River
11030018 Lower Walnut River
1104 Upper Cimarron
110400 Upper Cimarron
11040001 Cimarron headwaters
11040002 Upper Cimarron
11040003 North Fork Cimarron
11040004 Sand Arroyo
11040005 Bear
11040006 Upper Cimarron-Liberal
11040007 Crooked
11040008 Upper Cimarron-Bluff
1105 Lower Cimarron
110500 Lower Cimarron
11050001 Lower Cimarron-Eagle Chief
11050002 Lower Cimarron-Skeleton
11050003 Lower Cimarron
1106 Arkansas - Keystone
110600 Arkansas - Keystone
11060001 Kaw Lake
11060002 Upper Salt Fork Arkansas
11060003 Medicine Lodge
11060004 Lower Salt Fork Arkansas
11060005 Chikaskia
11060006 Black Bear-Red Rock
1107 Neosho - Verdigris
110701 Verdigris
11070101 Upper Verdigris
11070102 Fall
11070103 Middle Verdigris
11070104 Elk
11070105 Lower Verdigris
11070106 Caney
11070107 Bird
110702 Neosho
11070201 Neosho headwaters
11070202 Upper Cottonwood
11070203 Lower Cottonwood
11070204 Upper Neosho
11070205 Middle Neosho
11070206 Lake O' the Cherokees
11070207 Spring
11070208 Elk
11070209 Lower Neosho
1108 Upper Canadian
110800 Upper Canadian
11080001 Canadian headwaters
11080002 Cimarron
11080003 Upper Canadian
11080004 Mora
11080005 Conchas
11080006 Upper Canadian-Ute Reservoir
11080007 Ute
11080008 Revuelto
1109 Lower Canadian
110901 Middle Canadian
11090101 Middle Canadian-Trujillo
11090102 Punta De Agua
11090103 <NAME>
11090104 Carrizo
11090105 Lake Meredith
11090106 Middle Canadian-Spring
110902 Lower Canadian
11090201 Lower Canadian-Deer
11090202 Lower Canadian-Walnut
11090203 Little
11090204 Lower Canadian
1110 North Canadian
111001 Upper Beaver
11100101 Upper Beaver
11100102 Middle Beaver
11100103 Coldwater
11100104 Palo Duro
111002 Lower Beaver
11100201 Lower Beaver
11100202 Upper Wolf
11100203 Lower Wolf
111003 Lower North Canadian
11100301 Middle North Canadian
11100302 Lower North Canadian
11100303 Deep Fork
1111 Lower Arkansas
111101 Robert S. Kerr Reservoir
11110101 Polecat-Snake
11110102 Dirty-Greenleaf
11110103 Illinois
11110104 Robert S. Kerr Reservoir
11110105 Poteau
111102 Lower Arkansas-Fourche La Fave
11110201 Frog-Mulberry
11110202 Dardanelle Reservoir
11110203 Lake Conway-Point Remove
11110204 <NAME>
11110205 Cadron
11110206 Fourche La Fave
11110207 Lower Arkansas-Maumelle
1112 Red headwaters
111201 Prairie Dog Town | |
<gh_stars>1-10
import errno
import os
import logging
from logging.handlers import RotatingFileHandler
import re
from types import FunctionType
from sqlalchemy import create_engine
from sqlalchemy import and_, desc, or_
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import sessionmaker
from hathor.audio import metadata
from hathor.database.tables import BASE, Podcast
from hathor.database.tables import PodcastEpisode, PodcastTitleFilter
from hathor.exc import AudioFileException, HathorException
from hathor.podcast.archive import ARCHIVE_TYPES, ARCHIVE_KEYS
from hathor import settings, utils
FILE_PATH = os.path.abspath(__file__)
DIR_PATH = os.path.dirname(FILE_PATH)
PLUGIN_PATH = os.path.join(DIR_PATH, 'plugins')
REJECT_TAG_UPDATE_FILE_TYPES = ['.mp4', '.mkv']
def load_plugins():
skip_list = ['__init__.py']
plugin_list = []
# check plugin path for relevant py files
for dir_name, _, files in os.walk(PLUGIN_PATH):
for file_name in files:
if file_name in skip_list:
continue
if not file_name.endswith('.py'):
continue
# load py files
plugin_path = os.path.join(dir_name, file_name)
relative_path = os.path.join('hathor', os.path.relpath(plugin_path, DIR_PATH))
import_name = os.path.splitext(relative_path)[0]
# Remove for *nix systems and windows
import_name = import_name.replace(os.sep, ".")
# Now import
imported = __import__(import_name)
for name in relative_path.split(os.sep)[1:]:
if name.endswith('.py'):
name = name[:-3]
imported = getattr(imported, name)
# needed this odd logic to load again, probably a better
# way to do this, but this works for now
for key, value in vars(imported).items():
if isinstance(value, FunctionType):
plugin_list.append((key, value))
return plugin_list
def run_plugins():
def decorator(func):
def caller(*args, **kwargs):
result = func(*args, **kwargs)
# Assume first arg called is "self"
selfie = args[0]
# Look through plugins
for plugin in selfie.plugins:
# Plugins will be (name, func obj)
if plugin[0] == func.__name__:
# Run plugin function with client class
# and result of original function
plugin_func = plugin[1]
result = plugin_func(selfie, result)
return result
return caller
return decorator
def setup_logger(name, log_file_level, logging_file=None,
console_logging=True, console_logging_level=logging.INFO):
logger = logging.getLogger(name)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger.setLevel(log_file_level)
if logging_file is not None:
fh = RotatingFileHandler(logging_file,
backupCount=4,
maxBytes=((2 ** 20) * 10))
fh.setLevel(log_file_level)
fh.setFormatter(formatter)
logger.addHandler(fh)
if console_logging:
sh = logging.StreamHandler()
sh.setLevel(console_logging_level)
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
def check_inputs(user_input):
if user_input is None:
return None, 'No input given'
# if not list, check is int
if not isinstance(user_input, list):
if isinstance(user_input, bool):
return False, 'Input must be int type, %s given' % user_input
if not isinstance(user_input, int):
return False, 'Input must be int type, %s given' % user_input
user_input = [user_input]
else:
# if it is a list, check each item in list
for inputty in user_input:
if isinstance(inputty, bool):
return False, 'Input must be int type, %s given' % inputty
if not isinstance(inputty, int):
return False, 'Input must be int type, %s given' % inputty
return True, user_input
def check_arguement_type(value, types_allowed):
if not isinstance(types_allowed, list):
types_allowed = [types_allowed]
valid = False
for typer in types_allowed:
if typer is None:
if value is None:
valid = True
break
elif isinstance(value, typer):
valid = True
break
if not valid:
return False, '%s type given' % str(value.__class__.__name__)
return True, 'Valid input'
class HathorClient(object):
def __init__(self, podcast_directory=None, datetime_output_format=settings.DEFAULT_DATETIME_FORMAT,
logging_file=None, logging_file_level=logging.DEBUG,
database_file=None, soundcloud_client_id=None, google_api_key=None,
console_logging=True, console_logging_level=logging.INFO):
'''
Initialize the hathor client
podcast_directory : Directory where new podcasts will be placed by default
datetime_output_format : Python datetime output format
logging_file : Add logging handler for output file, will be rotational
logging_file_level : Level for file logging to use
database_file : Sqlite database to use, if None db will be stored in memory
soundcloud_client_id : Client id for accessing soundcloud API
google_api_key : Key for accessing google API for youtube
console_logging : Whether or not to set logging to console
console_logging_level : Level for console logging to use
'''
self.podcast_directory = None
if podcast_directory is not None:
self.podcast_directory = os.path.abspath(podcast_directory)
self.datetime_output_format = datetime_output_format
self.logger = setup_logger('hathor', logging_file_level, logging_file=logging_file,
console_logging=console_logging,
console_logging_level=console_logging_level)
if database_file is None:
engine = create_engine('sqlite:///', encoding='utf-8')
self.logger.debug("Initializing hathor client in memory (no database file given")
else:
engine = create_engine('sqlite:///%s' % database_file, encoding='utf-8')
self.logger.debug("Initializing hathor client with database file %s", database_file)
BASE.metadata.create_all(engine)
BASE.metadata.bind = engine
self.db_session = sessionmaker(bind=engine)()
if not soundcloud_client_id:
self.logger.debug("No soundcloud client id given, will not be able to access soundcloud api")
self.soundcloud_client_id = soundcloud_client_id
if not google_api_key:
self.logger.debug("No google api key given, will not be to able to access google api")
self.google_api_key = google_api_key
self.plugins = load_plugins()
def __exit__(self, _exc_type, _exc_value, _traceback):
self.db_session.close()
def _archive_manager(self, archive_type):
return ARCHIVE_TYPES[archive_type](self.logger,
self.soundcloud_client_id,
self.google_api_key)
def _database_select(self, table, given_input):
given_input = self._check_input(given_input)
if not given_input:
return []
return self.db_session.query(table).filter(table.id.in_(given_input))
def _fail(self, message):
self.logger.error(message)
raise HathorException(message)
def _check_argument_oneof(self, value, allowed_values, message):
if value not in allowed_values:
self._fail('%s - %s value given' % (message, value))
def _check_includers(self, include_args, exclude_args):
code, result = check_inputs(include_args)
if code is False:
self._fail(result)
elif code is True:
include_args = result
code, result = check_inputs(exclude_args)
if code is False:
self._fail(result)
elif code is True:
exclude_args = result
return include_args, exclude_args
def _check_input(self, user_input):
code, result = check_inputs(user_input)
if code is False:
self._fail(result)
elif code is True:
user_input = result
return user_input
def _check_arguement_type(self, user_input, types_allowed, message):
code, error_message = check_arguement_type(user_input, types_allowed)
if code is True:
return
else:
self._fail('%s - %s' % (message, error_message))
def _ensure_path(self, directory_path):
if not os.path.isdir(directory_path):
os.makedirs(directory_path)
self.logger.info("Created new directory:%s", directory_path)
def _remove_directory(self, directory_path):
try:
os.rmdir(directory_path)
self.logger.info("Removed directory:%s", directory_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
self.logger.warn("Unable to delete directory:%s, does not exist", directory_path)
else:
raise
def _remove_file(self, file_path):
try:
os.remove(file_path)
self.logger.info("Removed file:%s", file_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
self.logger.warn("Unable to delete file:%s, does not exist", file_path)
else:
raise
@run_plugins()
def podcast_create(self, archive_type, broadcast_id, podcast_name, max_allowed=None,
file_location=None, artist_name=None, automatic_download=True):
'''
Create new podcast
archive_type : Where podcast is downloaded from (rss/soundcloud/youtube)
broadcast_id : Identifier of podcast by archive_type, such as youtube channel ID
podcast_name : Name to identify podcast in database
max_allowed : When syncing the podcast, keep the last N episodes(if none keep all)
file_location : Where podcast files will be stored
artist_name : Name of artist to use when updating media file metadata
automatic_download : Automatically download new episodes with podcast sync
Returns: Integer dict object representing created podcast
'''
self._check_arguement_type(podcast_name, str, 'Podcast name must be string type')
self._check_arguement_type(broadcast_id, str, 'Brodcast ID must be string type')
self._check_arguement_type(archive_type, str, 'Archive Type must be string type')
self._check_arguement_type(automatic_download, bool, 'Automatic download must be boolean type')
self._check_arguement_type(max_allowed, [None, int], 'Max allowed must be None or int type')
self._check_arguement_type(file_location, [None, str], 'File location must be None or string type')
self._check_arguement_type(artist_name, [None, str], 'File location must be None or string type')
self._check_argument_oneof(archive_type, ARCHIVE_KEYS, 'Archive Type must be in accepted list of keys')
if max_allowed is not None and max_allowed < 1:
self._fail('Max allowed must be positive integer, %s given' % max_allowed)
if file_location is None:
if self.podcast_directory is None:
self._fail("No default podcast directory specified, will need specific file location to create podcast")
file_location = os.path.join(self.podcast_directory, utils.normalize_name(podcast_name))
pod_args = {
'name' : utils.clean_string(podcast_name),
'archive_type' : archive_type,
'broadcast_id' : utils.clean_string(broadcast_id),
'max_allowed' : max_allowed,
'file_location' : os.path.abspath(file_location),
'artist_name' : utils.clean_string(artist_name),
'automatic_episode_download' : automatic_download,
}
new_pod = Podcast(**pod_args)
try:
self.db_session.add(new_pod)
self.db_session.commit()
self.logger.info("Podcast created in database, id:%d, args %s",
new_pod.id, ' -- '.join('%s-%s' % (k, v) for k, v in pod_args.items()))
except IntegrityError:
self.db_session.rollback()
self._fail('Cannot create podcast, name was %s' % pod_args['name'])
self.logger.debug("Ensuring podcast %d path exists %s", new_pod.id, file_location)
self._ensure_path(file_location)
return new_pod.as_dict(self.datetime_output_format)
@run_plugins()
def podcast_list(self):
'''
List all podcasts
Returns: List of dictionaries for all podcasts
'''
query = self.db_session.query(Podcast).all()
podcast_data = []
for podcast in query:
podcast_data.append(podcast.as_dict(self.datetime_output_format))
return podcast_data
@run_plugins()
def podcast_show(self, podcast_input):
'''
Get information on one or many podcasts
podcast_input : Either single integer id, or list of integer ids
Returns: List of dictionaries for podcasts requested
'''
query = self._database_select(Podcast, podcast_input)
podcast_data = []
for podcast in query:
podcast_data.append(podcast.as_dict(self.datetime_output_format))
return podcast_data
@run_plugins()
def podcast_update(self, podcast_id, podcast_name=None, broadcast_id=None, archive_type=None,
max_allowed=None, artist_name=None, automatic_download=None):
'''
Update a single podcast
podcast_id : ID of podcast to edit
archive_type : Where podcast is downloaded from (rss/soundcloud/youtube)
broadcast_id : Identifier of podcast by archive_type, such as youtube channel ID
podcast_name : Name to identify podcast in database
max_allowed : When syncing the podcast, keep the last N episodes. Set to 0 for unlimited
artist_name : Name of artist to use when updating media file metadata
automatic_download : Automatically download episodes with podcast sync
Returns: dict object representing | |
,
u'㾇' : [u'm'] ,
u'贐' : [u'j'] ,
u'丒' : [u'c'] ,
u'檕' : [u'x', u'j'] ,
u'垗' : [u'z'] ,
u'昢' : [u'p'] ,
u'芥' : [u'j', u'g'] ,
u'侧' : [u'c', u'z'] ,
u'鸲' : [u'q'] ,
u'嬴' : [u'y'] ,
u'枷' : [u'j'] ,
u'獄' : [u'y'] ,
u'壉' : [u'j'] ,
u'歔' : [u'x'] ,
u'呖' : [u'l'] ,
u'烙' : [u'l', u'g'] ,
u'荤' : [u'h', u'x'] ,
u'棩' : [u'y'] ,
u'嗫' : [u'n'] ,
u'摶' : [u'z', u't'] ,
u'胹' : [u'e'] ,
u'㐁' : [u't'] ,
u'鲆' : [u'p'] ,
u'妈' : [u'm'] ,
u'挏' : [u'd'] ,
u'熘' : [u'l'] ,
u'鬟' : [u'h'] ,
u'䐡' : [u'q'] ,
u'榨' : [u'z'] ,
u'㤳' : [u'b'] ,
u'膸' : [u's'] ,
u'䊺' : [u'h', u'g'] ,
u'鑁' : [u'z'] ,
u'元' : [u'y'] ,
u'竊' : [u'q'] ,
u'豑' : [u'z'] ,
u'鋚' : [u't'] ,
u'応' : [u'y'] ,
u'慣' : [u'g'] ,
u'諪' : [u't'] ,
u'矬' : [u'c'] ,
u'饳' : [u'd'] ,
u'婵' : [u'c'] ,
u'濼' : [u'p', u'b', u'l'] ,
u'弆' : [u'j'] ,
u'箉' : [u'g'] ,
u'萌' : [u'm'] ,
u'眖' : [u'k'] ,
u'鎙' : [u's'] ,
u'䊣' : [u'h'] ,
u'漦' : [u'l'] ,
u'让' : [u'r'] ,
u'㖭' : [u't'] ,
u'帰' : [u'g'] ,
u'窳' : [u'y'] ,
u'蜶' : [u's'] ,
u'癀' : [u'h'] ,
u'鋃' : [u'l'] ,
u'㥊' : [u'p'] ,
u'䗍' : [u'l'] ,
u'湐' : [u'm'] ,
u'諓' : [u'j'] ,
u'㓗' : [u'q', u'j'] ,
u'党' : [u'd'] ,
u'緝' : [u'q', u'j'] ,
u'虠' : [u'j'] ,
u'䥪' : [u'x'] ,
u'闭' : [u'b'] ,
u'䓷' : [u'x'] ,
u'慺' : [u'l'] ,
u'跽' : [u'j'] ,
u'傄' : [u'x'] ,
u'昋' : [u'g'] ,
u'馊' : [u's'] ,
u'䢔' : [u'h', u'j', u'g'] ,
u'鸛' : [u'q', u'h', u'g'] ,
u'㮞' : [u'y', u'j', u'n'] ,
u'䄥' : [u'l'] ,
u'悤' : [u'c'] ,
u'厮' : [u's'] ,
u'礵' : [u's'] ,
u'颴' : [u'x'] ,
u'䮾' : [u'p'] ,
u'酅' : [u'x'] ,
u'䁏' : [u'y'] ,
u'揎' : [u'x', u's'] ,
u'襕' : [u'l'] ,
u'勘' : [u'k'] ,
u'硟' : [u'c'] ,
u'鯞' : [u'z'] ,
u'䫨' : [u'a'] ,
u'遯' : [u'd'] ,
u'拸' : [u'y', u'c'] ,
u'衿' : [u'q', u'j'] ,
u'鬈' : [u'q'] ,
u'堊' : [u'e'] ,
u'撍' : [u'z'] ,
u'瀚' : [u'h'] ,
u'鲝' : [u'z'] ,
u'㴜' : [u'b'] ,
u'妟' : [u'y'] ,
u'株' : [u'z'] ,
u'唬' : [u'h'] ,
u'熯' : [u'h'] ,
u'㺱' : [u'r'] ,
u'耺' : [u'y'] ,
u'䴼' : [u'c'] ,
u'榿' : [u'q'] ,
u'囁' : [u'n'] ,
u'敌' : [u'd'] ,
u'臏' : [u'b'] ,
u'仑' : [u'l'] ,
u'鵜' : [u't'] ,
u'婞' : [u'x'] ,
u'曡' : [u'd'] ,
u'牮' : [u'j'] ,
u'黱' : [u'd'] ,
u'㽰' : [u's'] ,
u'寳' : [u'b'] ,
u'橾' : [u's'] ,
u'垀' : [u'h'] ,
u'紇' : [u'h', u'j', u'g'] ,
u'㨉' : [u'm'] ,
u'芎' : [u'q', u'x'] ,
u'侐' : [u'x'] ,
u'锗' : [u'z', u'd'] ,
u'则' : [u'z'] ,
u'货' : [u'h'] ,
u'䨩' : [u'l'] ,
u'㜫' : [u'm'] ,
u'岲' : [u'k'] ,
u'戹' : [u'a', u'e'] ,
u'瓂' : [u'g'] ,
u'驉' : [u'x'] ,
u'䝋' : [u'z'] ,
u'泒' : [u'g'] ,
u'罛' : [u'g'] ,
u'蓢' : [u'l'] ,
u'䇤' : [u's', u'r', u'd'] ,
u'靫' : [u'c'] ,
u'呭' : [u'y'] ,
u'秴' : [u'h'] ,
u'轻' : [u'q'] ,
u'禁' : [u'j'] ,
u'鸄' : [u'j'] ,
u'䴎' : [u'l'] ,
u'憑' : [u'p'] ,
u'蘔' : [u'j'] ,
u'㰘' : [u'y'] ,
u'傛' : [u'y', u'r'] ,
u'甞' : [u'c'] ,
u'覡' : [u'x'] ,
u'碫' : [u'd'] ,
u'鴮' : [u'w'] ,
u'悻' : [u'x'] ,
u'蔾' : [u'l'] ,
u'㭂' : [u'j'] ,
u'必' : [u'b'] ,
u'瑈' : [u'r'] ,
u'裋' : [u's'] ,
u'䟕' : [u'c'] ,
u'鱘' : [u'x'] ,
u'䭢' : [u'n'] ,
u'濥' : [u'y'] ,
u'葨' : [u'w'] ,
u'廯' : [u'x'] ,
u'獲' : [u'h'] ,
u'韵' : [u'y'] ,
u'䛿' : [u'g'] ,
u'簃' : [u'y'] ,
u'鮂' : [u'q'] ,
u'䪌' : [u'z'] ,
u'搓' : [u'c'] ,
u'莒' : [u'j'] ,
u'㦖' : [u'm'] ,
u'匝' : [u'z'] ,
u'犜' : [u'd'] ,
u'谣' : [u'y'] ,
u'笭' : [u'l'] ,
u'骬' : [u'y'] ,
u'䦶' : [u'z'] ,
u'挽' : [u'w'] ,
u'芼' : [u'm'] ,
u'㣀' : [u'z'] ,
u'則' : [u'z'] ,
u'燆' : [u'q'] ,
u'譍' : [u'y'] ,
u'穗' : [u's'] ,
u'駖' : [u'l'] ,
u'䣠' : [u'j', u't'] ,
u'执' : [u'z'] ,
u'臦' : [u'g'] ,
u'共' : [u'g'] ,
u'烰' : [u'f'] ,
u'詷' : [u't'] ,
u'餀' : [u'h'] ,
u'娂' : [u'h'] ,
u'纅' : [u'l'] ,
u'脐' : [u'q'] ,
u'䈒' : [u'n'] ,
u'暕' : [u'j', u'l'] ,
u'宗' : [u'z'] ,
u'樢' : [u'n'] ,
u'躥' : [u'c'] ,
u'䎧' : [u'p', u'b'] ,
u'鈲' : [u'g'] ,
u'圴' : [u'z'] ,
u'殷' : [u'y'] ,
u'罄' : [u'q'] ,
u'鏇' : [u'x'] ,
u'哉' : [u'z'] ,
u'杔' : [u't'] ,
u'塖' : [u'c'] ,
u'糙' : [u'c'] ,
u'轤' : [u'l'] ,
u'䁦' : [u'q'] ,
u'擩' : [u'r'] ,
u'姫' : [u'j'] ,
u'桶' : [u't'] ,
u'賹' : [u'a'] ,
u'䇻' : [u'h', u'k', u'w'] ,
u'邆' : [u't'] ,
u'喈' : [u'j'] ,
u'漏' : [u'l'] ,
u'綘' : [u'f'] ,
u'㺚' : [u't'] ,
u'霟' : [u'h'] ,
u'䠡' : [u'c'] ,
u'斨' : [u'q'] ,
u'鶴' : [u'h'] ,
u'瀱' : [u'j'] ,
u'㔳' : [u'h', u'j', u'g'] ,
u'趸' : [u'd'] ,
u'人' : [u'r'] ,
u'顁' : [u'd'] ,
u'嵃' : [u'y'] ,
u'益' : [u'y'] ,
u'聑' : [u'd'] ,
u'䕓' : [u'c'] ,
u'黚' : [u'q'] ,
u'叜' : [u's'] ,
u'浣' : [u'h', u'g', u'w'] ,
u'蛪' : [u'q'] ,
u'篬' : [u'q'] ,
u'㳮' : [u'n'] ,
u'镳' : [u'b'] ,
u'噵' : [u'd'] ,
u'㺃' : [u'g'] ,
u'匆' : [u'c'] ,
u'瞉' : [u'k'] ,
u'蠌' : [u'z'] ,
u'笖' : [u'y'] ,
u'龙' : [u'm', u'l'] ,
u'亣' : [u't'] ,
u'挦' : [u'x'] ,
u'㦭' : [u'l'] ,
u'到' : [u'd'] ,
u'皳' : [u'q'] ,
u'謶' : [u'z'] ,
u'穀' : [u'g', u'n'] ,
u'黃' : [u'h'] ,
u'䧍' : [u'x'] ,
u'扐' : [u'l'] ,
u'蛓' : [u'c'] ,
u'嵚' : [u'q'] ,
u'詠' : [u'y'] ,
u'䕪' : [u'z'] ,
u'駭' : [u'h'] ,
u'浺' : [u'c'] ,
u'臽' : [u'x'] ,
u'岄' : [u'y'] ,
u'樋' : [u't'] ,
u'䒔' : [u'b'] ,
u'鈛' : [u'g'] ,
u'沤' : [u'o'] ,
u'㰯' : [u'k'] ,
u'微' : [u'w'] ,
u'电' : [u'd'] ,
u'钴' : [u'g'] ,
u'䞾' : [u'c'] ,
u'鵅' : [u'l'] ,
u'䱏' : [u't'] ,
u'濎' : [u'd', u't'] ,
u'蕕' : [u'y'] ,
u'㽙' : [u'j'] ,
u'廘' : [u'l'] ,
u'瑟' : [u's'] ,
u'韞' : [u'y', u'w'] ,
u'䛨' : [u'x'] ,
u'鱯' : [u'h'] ,
u'佹' : [u'g'] ,
u'滸' : [u'h', u'x'] ,
u'葿' : [u'm'] ,
u'霈' : [u'p'] ,
u'吊' : [u'd'] ,
u'梍' : [u'z'] ,
u'猪' : [u'z'] ,
u'簚' : [u'm'] ,
u'邝' : [u'k'] ,
u'喟' : [u'k'] ,
u'搪' : [u't'] ,
u'夬' : [u'g'] ,
u'綯' : [u'k', u't'] ,
u'谺' : [u'x'] ,
u'䄼' : [u't'] ,
u'斿' : [u'y', u'l'] ,
u'嫁' : [u'j'] ,
u'楌' : [u'y'] ,
u'跏' : [u'j'] ,
u'䋑' : [u'b'] ,
u'噞' : [u'y'] ,
u'櫡' : [u'z'] ,
u'繮' : [u'j'] ,
u'鋱' : [u't'] ,
u'埳' : [u'k'] ,
u'晾' : [u'l'] ,
u'宀' : [u'm'] ,
u'焇' : [u'x'] ,
u'㘉' : [u'z'] ,
u'躎' : [u'n'] ,
u'䎐' : [u'c'] ,
u'餗' : [u's'] ,
u'帙' : [u'z'] ,
u'殠' : [u'c'] ,
u'脧' : [u'z', u'j'] ,
u'䘩' : [u'x'] ,
u'㬫' : [u'y'] ,
u'鎰' : [u'y'] ,
u'傲' : [u'a'] ,
u'渹' : [u'q', u'h'] ,
u'磂' : [u'l'] ,
u'㷄' : [u'h'] ,
u'陉' : [u'x', u'j'] ,
u'䭋' : [u'b'] ,
u'惒' : [u'h'] ,
u'獛' : [u'p'] ,
u'裢' : [u'l'] ,
u'魫' : [u's'] ,
u'塭' : [u'w'] ,
u'痴' : [u'c'] ,
u'荻' : [u'd'] ,
u'䁽' : [u'l'] ,
u'綁' : [u'b'] ,
u'騄' : [u'l'] ,
u'䤎' : [u'j'] ,
u'斑' : [u'b'] ,
u'舔' : [u't'] ,
u'㠘' : [u'y'] ,
u'咛' : [u'n'] ,
u'焞' : [u't'] ,
u'趡' : [u'c'] ,
u'餮' : [u't'] ,
u'撻' : [u't'] ,
u'脾' : [u'p', u'b'] ,
u'㽂' : [u's'] ,
u'寅' : [u'y'] ,
u'灈' : [u'q'] ,
u'賋' : [u'j'] ,
u'䏕' : [u'r'] ,
u'願' : [u'y'] ,
u'佢' : [u'q'] ,
u'毥' : [u'x'] ,
u'㹬' : [u's'] ,
u'嫯' : [u'a'] ,
u'睲' : [u'x'] ,
u'鏵' : [u'h'] ,
u'砃' : [u'd'] ,
u'龂' : [u'y', u'k'] ,
u'二' : [u'e'] ,
u'怓' : [u'n'] ,
u'螒' : [u'h'] ,
u'㶖' : [u's'] ,
u'圝' : [u'l'] ,
u'皜' : [u'h', u'g'] ,
u'蠣' : [u'l'] ,
u'缭' : [u'l'] ,
u'麬' : [u'f'] ,
u'朽' : [u'x'] ,
u'蚼' : [u'g'] ,
u'㳀' : [u'x', u'k', u'g'] ,
u'噇' : [u'c'] ,
u'痆' : [u'n'] ,
u'轍' : [u'c', u'z'] ,
u'繗' : [u'l'] ,
u'鷖' : [u'y'] ,
u'䳠' : [u'c', u'r', u'z'] ,
u'晧' : [u'h'] ,
u'藦' : [u'm'] ,
u'啱' : [u'y'] ,
u'蹷' : [u'j'] ,
u'鴀' : [u'f'] ,
u'市' : [u's'] ,
u'窅' : [u'y'] ,
u'㞇' : [u'w'] ,
u'蔐' : [u'd'] ,
u'投' : [u't'] ,
u'得' : [u'd'] ,
u'渢' : [u'f'] ,
u'誥' : [u'g'] ,
u'䞧' : [u'h'] ,
u'防' : [u'f'] ,
u'匴' : [u's'] ,
u'澷' : [u'm'] ,
u'筄' : [u'y'] ,
u'韇' : [u'd'] ,
u'僉' : [u'q'] ,
u'捔' : [u'j'] ,
u'屖' : [u'x'] ,
u'磙' | |
)
cbar.mappable.set_clim(clim)
cbar.set_label(clabel, fontweight="bold")
# AR our tileid
tmpra = np.remainder(tilera + 360 - org, 360)
if tmpra > 180:
tmpra -= 360
if tmpra > 0:
dra = -40
else:
dra = 40
ramws, decmws = get_radec_mw(
np.array([tilera, tilera + dra]), np.array([tiledec, tiledec - 30]), org
)
ax.scatter(
ramws[0],
decmws[0],
edgecolors="k",
facecolors="none",
marker="o",
s=50,
zorder=1,
)
arrow_args = dict(color="k", width=1, headwidth=5, headlength=10)
ax.annotate(
"",
xy=(ramws[0], decmws[0]),
xytext=(ramws[1], decmws[1]),
arrowprops=arrow_args,
)
def get_expids_efftimes(tileqafits, prod):
"""
Get the EFFTIME and EFFTIMEQA for the EXPIDs from the coadd.
Args:
tileqafits: path to the tile-qa-TILEID-NIGHT.fits file
prod: full path to input reduction, e.g. /global/cfs/cdirs/desi/spectro/redux/daily (string)
Returns:
structured array with the following keys:
EXPID, NIGHT, EFFTIME_SPEC, QA_EFFTIME_SPEC
Notes:
We work from the spectra-*fits files; if not present in the same folder
as tileqafits, we look into the expected path using prod.
As this is run *before* desi_tsnr_afterburner, we compute here the
EFFTIME_SPEC values.
If no GOALTYPE in tileqafits header, we default to dark.
TBD: we purposely do not use TSNR2 keys from qa-params.yaml,
as those do not handle the TSNR2_ELG->TSNR2_LRG change from
2021 shutdown.
We use:
- dark before 20210901: TSNR2_ELG
- dark after 20210901: TSNR2_LRG
- bright: TSNR2_BGS
- backup: TSNR2_BGS
Method assessed against all Main exposures until 20211013 in daily tsnr-exposures.fits.
"""
# AR GOALTYPE (defaulting to dark) + TSNR2 key
goaltype = "dark"
h = fits.open(tileqafits)
hdr = fits.getheader(tileqafits, "FIBERQA")
if "GOALTYPE" in [cards[0] for cards in hdr.cards]:
goaltype = hdr["GOALTYPE"].lower()
if goaltype in ["bright", "backup"]:
tsnr2_key = "<KEY>"
else:
if hdr["LASTNITE"] < 20210921:
tsnr2_key = "TSNR2_ELG"
else:
tsnr2_key = "<KEY>"
# AR get list of exposures used for the tile
# AR first try spectra*fits files in the same folder as tileqafits
tmpstr = os.path.join(
os.path.dirname(tileqafits),
"spectra-*-{}-thru{}.fits".format(hdr["TILEID"], hdr["LASTNITE"]),
)
spectra_fns = sorted(glob(tmpstr))
# AR then try based on prod
if len(spectra_fns) == 0:
tmpstr = os.path.join(
prod,
"tiles",
"cumulative",
"{}".format(hdr["TILEID"]),
"{}".format(hdr["LASTNITE"]),
"spectra-*-{}-thru{}.fits".format(hdr["TILEID"], hdr["LASTNITE"]),
)
spectra_fns = sorted(glob(tmpstr))
if len(spectra_fns) > 0:
fmap = read_fibermap(spectra_fns[0])
expids, ii = np.unique(fmap["EXPID"], return_index=True)
nights = fmap["NIGHT"][ii]
# AR then try based on prod
else:
expids, nights = [], []
nexp = len(expids)
# AR looping on EXPIDS
d = Table()
d["EXPID"] = expids
d["NIGHT"] = nights
d["EFFTIME_SPEC"], d["QA_EFFTIME_SPEC"] = np.zeros(nexp), np.zeros(nexp)
for i in range(nexp):
# AR EFFTIME_SPEC, with looping on petals and cameras
tsnr2_petals = np.zeros(10)
for petal in range(10):
for camera in ["b", "r", "z"]:
tsnr2_key_cam = "{}_{}".format(tsnr2_key, camera.upper())
fn = os.path.join(
prod,
"exposures",
"{}".format(nights[i]),
"{:08d}".format(expids[i]),
"cframe-{}{}-{:08d}.fits".format(camera, petal, expids[i]),
)
if os.path.isfile(fn):
vals = fitsio.read(fn, ext="SCORES", columns=[tsnr2_key_cam])[tsnr2_key_cam]
tsnr2_petals[petal] += np.median(vals[vals > 0])
d["EFFTIME_SPEC"][i] = tsnr2_to_efftime(tsnr2_petals[tsnr2_petals > 0].mean(), tsnr2_key.split("_")[-1])
# QA_EFFTIME_SPEC, reading exposure-qa*fits
fn = os.path.join(
prod,
"exposures",
"{}".format(nights[i]),
"{:08d}".format(expids[i]),
"exposure-qa-{:08d}.fits".format(expids[i]),
)
if os.path.isfile(fn):
d["QA_EFFTIME_SPEC"][i] = fits.getheader(fn, "FIBERQA")["EFFTIME"]
return d
def make_tile_qa_plot(
tileqafits,
prod,
pngoutfile=None,
dchi2_min=None,
tsnr2_key=None,
refdir=resource_filename("desispec", "data/qa"),
):
"""
Generate the per-cumulative tile QA png file.
Will replace .fits by .png in tileqafits for the png filename.
Args:
tileqafits: path to the tile-qa-TILEID-NIGHT.fits file
prod: full path to input reduction, e.g. /global/cfs/cdirs/desi/spectro/redux/daily (string)
Options:
pngoutfile: output filename; default to tileqafits .fits -> .png
dchi2_min (optional, defaults to value in qa-params.yaml): minimum DELTACHI2 for a valid zspec (float)
tsnr2_key (optional, defaults to value in qa-params.yaml): TSNR2 key used for plot (string)
refdir (optional, defaults to "desispec","data/qa"): path to folder with reference measurements for the n(z) and the TSNR2 (string)
Note:
If hdr["SURVEY"] is not "main", will not plot the n(z).
If hdr["FAPRGRM"].lower() is not "bright" or "dark", will not plot the TSNR2 plot nor the skymap.
"""
# AR config
config = get_qa_config()
# AR default values
if dchi2_min is None:
dchi2_min = config["tile_qa_plot"]["dchi2_min"]
if tsnr2_key is None:
tsnr2_key = config["tile_qa_plot"]["tsnr2_key"]
# SB derive output file name, handling case if ".fits" appears in path
if pngoutfile is None:
base = os.path.splitext(os.path.basename(tileqafits))[0]
pngoutfile = os.path.join(os.path.dirname(tileqafits), base+'.png')
# AR reading
h = fits.open(tileqafits)
hdr = h["FIBERQA"].header
fiberqa = h["FIBERQA"].data
petalqa = h["PETALQA"].data
if not "SURVEY" in hdr :
print("no SURVEY keyword in header, skip this tile")
return
# AR start plotting
fig = plt.figure(figsize=(20, 15))
gs = gridspec.GridSpec(6, 4, wspace=0.25, hspace=0.2)
# AR exposures from that TILEID
exps = get_expids_efftimes(tileqafits, prod)
xs = (-0.2, 0.1, 0.4, 0.7)
y, dy = 0.95, -0.10
fs = 10
ax = plt.subplot(gs[0, 1])
ax.axis("off")
txts = ["EXPID", "NIGHT", "EFFTIME", "QA_EFFTIME"]
for x, txt in zip(xs, txts):
ax.text(x, y, txt, fontsize=fs, fontweight="bold", transform=ax.transAxes)
y += 2 * dy
for i in range(len(exps)):
txts = [
"{:08d}".format(exps["EXPID"][i]),
"{}".format(exps["NIGHT"][i]),
"{:.0f}s".format(exps["EFFTIME_SPEC"][i]),
"{:.0f}s".format(exps["QA_EFFTIME_SPEC"][i]),
]
for x, txt in zip(xs, txts):
ax.text(x, y, txt, fontsize=fs, transform=ax.transAxes)
y += dy
# AR cutout
ax = plt.subplot(gs[2:4, 1])
plot_cutout(ax, hdr["TILEID"], hdr["TILERA"], hdr["TILEDEC"], 4)
# AR n(z)
# AR n(z): plotting only if main survey
if hdr["SURVEY"] == "main" and hdr["FAPRGRM"].lower() != "backup" :
# AR n(z): reference
ref = Table.read(os.path.join(refdir, "qa-reference-nz.ecsv"))
# AR n(z), for the tracers for that program
tracers = [
tracer
for tracer in list(config["tile_qa_plot"]["tracers"].keys())
if config["tile_qa_plot"]["tracers"][tracer]["program"]
== hdr["FAPRGRM"].upper()
]
cols = plt.rcParams["axes.prop_cycle"].by_key()["color"][: len(tracers)]
# AR number of valid zspec in zmin, zmax
n_valid, nref_valid = 0.0, 0.0
# compare number of qsos from redrock and QuasarNP
nqso_rr = 0
### nqso_qnp = 0
# AR plot
ax = plt.subplot(gs[0:2, 2])
for tracer, col in zip(tracers, cols):
# AR considered tile
bins, zhists = get_zhists(hdr["TILEID"], tracer, dchi2_min, fiberqa)
cens = 0.5 * (bins[1:] + bins[:-1])
ax.plot(cens, zhists, color=col, label=tracer)
# AR number of valid zspec
zmin, zmax = get_tracer_zminmax(tracer)
istracer = get_tracer(tracer, fiberqa)
sel = (bins[:-1] >= zmin) & (bins[1:] <= zmax)
n_valid += zhists[sel].sum() * istracer.sum()
if tracer=="QSO" :
nqso_rr = int(zhists[sel].sum() * istracer.sum())
### nqso_qnp = np.sum((fiberqa['IS_QSO_QN']==1)\
### &(fiberqa['Z_QN']>=zmin)&(fiberqa['Z_QN']<=zmax))
# AR reference
sel = ref["TRACER"] == tracer
ax.fill_between(
cens,
ref["N_MEAN"][sel] - ref["N_MEAN_STD"][sel],
ref["N_MEAN"][sel] + ref["N_MEAN_STD"][sel],
color=col,
alpha=0.3,
label="{} reference".format(tracer),
)
# AR reference number of valid zspec
sel &= (ref["ZMIN"] >= zmin) & (ref["ZMAX"] <= zmax)
nref_valid += ref["N_MEAN"][sel].sum() * istracer.sum()
ax.legend(ncol=2)
ax.set_xlabel("Z")
ax.set_ylabel("Per tile fractional count")
if hdr["FAPRGRM"].lower() == "bright":
ax.set_xlim(0, 1.5)
ax.set_ylim(0, 0.4)
else:
ax.set_xlim(0, 6)
ax.set_ylim(0, 0.2)
ax.grid(True)
# AR n(z) : ratio
ratio_nz = n_valid / nref_valid
# AR n(z): if not main, just put dummy -1
else:
ratio_nz = -1
nqso_rr = -1
### nqso_qnp = -1
# AR Z vs. FIBER plot
ax = plt.subplot(gs[0:2, 3])
xlim, ylim = (-100, 5100), (-1.1, 1.1)
yticks = np.array([0, 0.1, 0.25, 0.5, 1, 2, 3, 4, 5, 6])
# AR identifying non-assigned/sky/broken fibers
# AR (equivalent of OBJTYPE!="TGT" in fiberassign-TILEID.fits.gz)
# AR undirect way, as not all columns are here...
# AR the DESI_TARGET column for sky should be present + correctly set
# AR for all surveys (with same bits); SUPP_SKY will have SKY set too
nontgt = np.zeros(len(fiberqa), dtype=bool)
for msk in ["SKY", "BAD_SKY"]:
nontgt |= (fiberqa["DESI_TARGET"] & desi_mask[msk]) > 0
for msk in ["UNASSIGNED", "STUCKPOSITIONER", "BROKENFIBER"]:
nontgt |= (fiberqa["QAFIBERSTATUS"] & fibermask[msk]) > 0
sels = [
(~nontgt) & (fiberqa["QAFIBERSTATUS"] == 0),
(~nontgt) & (fiberqa["QAFIBERSTATUS"] > 0),
nontgt
]
labels = ["QAFIBERSTATUS = 0", "QAFIBERSTATUS > 0", "non-TGT"]
cs = ["b", "r", "y"]
zorders = [1, 1, 0]
for sel, label, c, zorder in zip(sels, labels, cs, zorders):
ax.scatter(fiberqa["FIBER"][sel], np.log10(0.1 + fiberqa["Z"][sel]), s=0.1, c=c, alpha=1.0, zorder=zorder, label="{} ({} fibers)".format(label, sel.sum()))
for petal in range(10):
if petal % 2 == 0:
ax.axvspan(petal * 500, (petal + 1) * 500, color="k", alpha=0.05, zorder=0)
ax.text(petal * 500 + 250, -1.09, str(petal), color="k", fontsize=10, ha="center")
ax.set_xlabel("FIBER")
ax.set_ylabel("Z")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_yticks(np.log10(0.1 + yticks))
ax.set_yticklabels(yticks.astype(str))
ax.grid(True)
ax.legend(loc=2, markerscale=10, fontsize=7)
show_efftime = True # else show TSNR
if show_efftime :
ax = plt.subplot(gs[2:4, 2])
x = fiberqa["MEAN_FIBER_X"]
y = fiberqa["MEAN_FIBER_Y"]
fibers = fiberqa["FIBER"]
efftime = fiberqa["EFFTIME_SPEC"]
medefftime = np.median(efftime[efftime>0])
vmin = 0.5*medefftime
vmax = 1.5*medefftime
sel = (efftime>0)
sc = ax.scatter(
x[sel],
y[sel],
c=efftime[sel],
cmap=matplotlib.cm.viridis_r,
vmin=vmin,
vmax=vmax,
s=5,
)
sel = ((fiberqa["QAFIBERSTATUS"] & fibermask.mask("LOWEFFTIME")) > 0)&(efftime>0)
ax.scatter(x[sel],y[sel],
edgecolor="r", facecolors="none", s=5, alpha=0.5,
label="LOWEFFTIME")
# plotting fibers discarded | |
output .nc file from replica exchange simulation, (default='output/output.nc')
:type output_data: str
:param output_directory: path to which output files will be written (default='output')
:type output_directory: stry
:param series_per_page: number of replica data series to plot per pdf page (default=4)
:type series_per_page: int
:param write_data_file: Option to write a text data file containing the state_energies array (default=True)
:type write_data_file: Boolean
:param plot_production_only: Option to plot only the production region, as determined from pymbar detectEquilibration (default=False)
:type plot_production_only: Boolean
:param equil_nskip: skip this number of frames to sparsify the energy timeseries for pymbar detectEquilibration (default=1) - this is used only when frame_begin=0 and the trajectory has less than 40000 frames.
:type equil_nskip: Boolean
:param frame_begin: analyze starting from this frame, discarding all prior as equilibration period (default=0)
:type frame_begin: int
:param frame_end: analyze up to this frame only, discarding the rest (default=-1).
:type frame_end: int
:returns:
- replica_energies ( `Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>`_ ( np.float( [number_replicas,number_simulation_steps] ), simtk.unit ) ) - The potential energies for all replicas at all (printed) time steps
- replica_state_indices ( np.int64( [number_replicas,number_simulation_steps] ), simtk.unit ) - The thermodynamic state assignments for all replicas at all (printed) time steps
- production_start ( int - The frame at which the production region begins for all replicas, as determined from pymbar detectEquilibration
- sample_spacing ( int - The number of frames between uncorrelated state energies, estimated using heuristic algorithm )
- n_transit ( np.float( [number_replicas] ) ) - Number of half-transitions between state 0 and n for each replica
- mixing_stats ( tuple ( np.float( [number_replicas x number_replicas] ) , np.float( [ number_replicas ] ) , float( statistical inefficiency ) ) ) - transition matrix, corresponding eigenvalues, and statistical inefficiency
"""
t1 = time.perf_counter()
# Read the simulation coordinates for individual temperature replicas
reporter = MultiStateReporter(output_data, open_mode="r")
t2 = time.perf_counter()
if print_timing:
print(f"open data time: {t2-t1}")
# figure out what the time between output is.
# We assume all use the same time step (which i think is required)
mcmove = reporter.read_mcmc_moves()[0]
time_interval = mcmove.n_steps*mcmove.timestep
t3 = time.perf_counter()
if print_timing:
print(f"read_mcmc_moves time: {t3-t2}")
# figure out what the temperature list is
states = reporter.read_thermodynamic_states()[0]
t4 = time.perf_counter()
if print_timing:
print(f"read_thermodynamics_states time: {t4-t3}")
temperature_list = []
for s in states:
temperature_list.append(s.temperature)
analyzer = ReplicaExchangeAnalyzer(reporter)
t5 = time.perf_counter()
(
replica_energies,
unsampled_state_energies,
neighborhoods,
replica_state_indices,
) = analyzer.read_energies()
# Truncate output of read_energies() to last frame of interest
if frame_end > 0:
# Use frames from frame_begin to frame_end
replica_energies = replica_energies[:,:,:frame_end]
unsampled_state_energies = unsampled_state_energies[:,:,:frame_end]
neighborhoods = neighborhoods[:,:,:frame_end]
replica_state_indices = replica_state_indices[:,:frame_end]
t6 = time.perf_counter()
if print_timing:
print(f"read_energies time: {t6-t5}")
n_particles = np.shape(reporter.read_sampler_states(iteration=0)[0].positions)[0]
temps = np.array([temp._value for temp in temperature_list])
beta_k = 1 / (kB * temps)
n_replicas = len(temperature_list)
for k in range(n_replicas):
replica_energies[:, k, :] *= beta_k[k] ** (-1)
t7 = time.perf_counter()
if print_timing:
print(f"reduce replica energies time: {t7-t6}")
total_steps = len(replica_energies[0][0])
state_energies = np.zeros([n_replicas, total_steps])
t8 = time.perf_counter()
# there must be some better way to do this as list comprehension.
for step in range(total_steps):
for state in range(n_replicas):
state_energies[state, step] = replica_energies[
np.where(replica_state_indices[:, step] == state)[0], 0, step
]
t9 = time.perf_counter()
if print_timing:
print(f"assign state energies time: {t9-t8}")
# can run physical-valication on these state_energies
# Use pymbar timeseries module to detect production period
t10 = time.perf_counter()
# Start of equilibrated data:
t0 = np.zeros((n_replicas))
# Statistical inefficiency:
g = np.zeros((n_replicas))
subsample_indices = {}
# If sufficiently large, discard the first 20000 frames as equilibration period and use
# subsampleCorrelatedData to get the energy decorrelation time.
if total_steps >= 40000 or frame_begin > 0:
if frame_begin > 0:
# If specified, use frame_begin as the start of the production region
production_start=frame_begin
else:
# Otherwise, use frame 20000
production_start=20000
for state in range(n_replicas):
subsample_indices[state] = timeseries.subsampleCorrelatedData(
state_energies[state][production_start:],
conservative=True,
)
g[state] = subsample_indices[state][1]-subsample_indices[state][0]
else:
# For small trajectories, use detectEquilibration
for state in range(n_replicas):
t0[state], g[state], Neff_max = timeseries.detectEquilibration(state_energies[state], nskip=equil_nskip)
# Choose the latest equil timestep to apply to all states
production_start = int(np.max(t0))
# Assume a normal distribution (very rough approximation), and use mean plus
# the number of standard deviations which leads to (n_replica-1)/n_replica coverage
# For 12 replicas this should be the mean + 1.7317 standard deviations
# x standard deviations is the solution to (n_replica-1)/n_replica = erf(x/sqrt(2))
# This is equivalent to a target of 23/24 CDF value
print(f"g: {g.astype(int)}")
def erf_fun(x):
return np.power((erf(x/np.sqrt(2))-(n_replicas-1)/n_replicas),2)
# x must be larger than zero
opt_g_results = minimize_scalar(
erf_fun,
bounds=(0,10)
)
if not opt_g_results.success:
print("Error solving for correlation time, exiting...")
print(f"erf opt results: {opt_g_results}")
exit()
sample_spacing = int(np.ceil(np.mean(g)+opt_g_results.x*np.std(g)))
t11 = time.perf_counter()
if print_timing:
print(f"detect equil and subsampling time: {t11-t10}")
print("state mean energies variance")
for state in range(n_replicas):
state_mean = np.mean(state_energies[state,production_start::sample_spacing])
state_std = np.std(state_energies[state,production_start::sample_spacing])
print(
f" {state:4d} {state_mean:10.6f} {state_std:10.6f}"
)
t12 = time.perf_counter()
if write_data_file == True:
f = open(os.path.join(output_directory, "replica_energies.dat"), "w")
for step in range(total_steps):
f.write(f"{step:10d}")
for replica_index in range(n_replicas):
f.write(f"{replica_energies[replica_index,replica_index,step]:12.6f}")
f.write("\n")
f.close()
t13 = time.perf_counter()
if print_timing:
print(f"Optionally write .dat file: {t13-t12}")
t14 = time.perf_counter()
if plot_production_only==True:
plot_replica_exchange_energies(
state_energies[:,production_start:],
temperature_list,
series_per_page,
time_interval=time_interval,
time_shift=production_start*time_interval,
file_name=f"{output_directory}/rep_ex_ener.pdf",
)
plot_replica_exchange_energy_histograms(
state_energies[:,production_start:],
temperature_list,
file_name=f"{output_directory}/rep_ex_ener_hist.pdf",
)
plot_replica_exchange_summary(
replica_state_indices[:,production_start:],
temperature_list,
series_per_page,
time_interval=time_interval,
time_shift=production_start*time_interval,
file_name=f"{output_directory}/rep_ex_states.pdf",
)
plot_replica_state_matrix(
replica_state_indices[:,production_start:],
file_name=f"{output_directory}/state_probability_matrix.pdf",
)
else:
plot_replica_exchange_energies(
state_energies,
temperature_list,
series_per_page,
time_interval=time_interval,
file_name=f"{output_directory}/rep_ex_ener.pdf",
)
plot_replica_exchange_energy_histograms(
state_energies,
temperature_list,
file_name=f"{output_directory}/rep_ex_ener_hist.pdf",
)
plot_replica_exchange_summary(
replica_state_indices,
temperature_list,
series_per_page,
time_interval=time_interval,
file_name=f"{output_directory}/rep_ex_states.pdf",
)
plot_replica_state_matrix(
replica_state_indices,
file_name=f"{output_directory}/state_probability_matrix.pdf",
)
t15 = time.perf_counter()
if print_timing:
print(f"plotting time: {t15-t14}")
# Analyze replica exchange state transitions
# For each replica, how many times does the thermodynamic state go between state 0 and state n
# For consistency with the other mixing statistics, use only the production region here
replica_state_indices_prod = replica_state_indices[:,production_start:]
# Number of one-way transitions from states 0 to n or states n to 0
n_transit = np.zeros((n_replicas,1))
# Replica_state_indices is [n_replicas x n_iterations]
for rep in range(n_replicas):
last_bound = None
for i in range(replica_state_indices_prod.shape[1]):
if replica_state_indices_prod[rep,i] == 0 or replica_state_indices_prod[rep,i] == (n_replicas-1):
if last_bound is None:
# This is the first time state 0 or n is visited
pass
else:
if last_bound != replica_state_indices_prod[rep,i]:
# This is a completed transition from 0 to n or n to 0
n_transit[rep] += 1
last_bound = replica_state_indices_prod[rep,i]
t16 = time.perf_counter()
if print_timing:
print(f"replica transition analysis: {t16-t15}")
# Compute transition matrix from the analyzer
mixing_stats = analyzer.generate_mixing_statistics(number_equilibrated=production_start)
t17 = time.perf_counter()
if print_timing:
print(f"compute transition matrix: {t17-t16}")
print(f"total time elapsed: {t17-t1}")
return (replica_energies, replica_state_indices, production_start, sample_spacing, n_transit, mixing_stats)
def run_replica_exchange(
topology,
system,
positions,
total_simulation_time=1.0 * unit.picosecond,
simulation_time_step=None,
temperature_list=None,
friction=1.0 / unit.picosecond,
minimize=True,
exchange_frequency=1000,
output_data="output/output.nc",
):
"""
Run a OpenMMTools replica exchange simulation using an OpenMM coarse grained model.
:param topology: OpenMM Topology
:type topology: `Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_
:param system: OpenMM System()
:type system: `System() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1openmm_1_1System.html>`_
:param positions: Positions array for the model we would like to test
:type positions: `Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>`_ ( np.array( [cgmodel.num_beads,3] ), simtk.unit )
:param total_simulation_time: Total run time for individual simulations
:type total_simulation_time: `SIMTK <https://simtk.org/>`_ `Unit() <http://docs.openmm.org/7.1.0/api-python/generated/simtk.unit.unit.Unit.html>`_
:param simulation_time_step: Simulation integration time step
:type simulation_time_step: `SIMTK <https://simtk.org/>`_ `Unit() <http://docs.openmm.org/7.1.0/api-python/generated/simtk.unit.unit.Unit.html>`_
:param temperature_list: List of temperatures for which to perform replica exchange simulations, default = None
:type temperature: List( float * simtk.unit.temperature )
:param friction: Langevin thermostat friction coefficient, default = 1 / ps
:type friction: `SIMTK <https://simtk.org/>`_ `Unit() <http://docs.openmm.org/7.1.0/api-python/generated/simtk.unit.unit.Unit.html>`_
:param minimize: Whether minimization is done before running the simulation
:type minimize: bool
:param output_data: Name of NETCDF file where we will write simulation data
:type output_data: string
:param exchange_frequency: Number of time steps between replica exchange attempts, Default = None
:type exchange_frequency: int
:param output_data: file to put the output | |
the name and the display name of the metric, i.e. it is a localizable string.
:type name: ~$(python-base-namespace).v2016_03_01.models.LocalizableString
:param unit: the unit of the metric. Possible values include: "Count", "Bytes", "Seconds",
"CountPerSecond", "BytesPerSecond", "Percent", "MilliSeconds", "ByteSeconds", "Unspecified",
"Cores", "MilliCores", "NanoCores", "BitsPerSecond".
:type unit: str or ~$(python-base-namespace).v2016_03_01.models.Unit
:param primary_aggregation_type: the primary aggregation type value defining how to use the
values for display. Possible values include: "None", "Average", "Count", "Minimum", "Maximum",
"Total".
:type primary_aggregation_type: str or
~$(python-base-namespace).v2016_03_01.models.AggregationType
:param metric_availabilities: the collection of what aggregation intervals are available to be
queried.
:type metric_availabilities:
list[~$(python-base-namespace).v2016_03_01.models.MetricAvailability]
:param id: the resource identifier of the metric definition.
:type id: str
"""
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'LocalizableString'},
'unit': {'key': 'unit', 'type': 'str'},
'primary_aggregation_type': {'key': 'primaryAggregationType', 'type': 'str'},
'metric_availabilities': {'key': 'metricAvailabilities', 'type': '[MetricAvailability]'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MetricDefinition, self).__init__(**kwargs)
self.resource_id = kwargs.get('resource_id', None)
self.name = kwargs.get('name', None)
self.unit = kwargs.get('unit', None)
self.primary_aggregation_type = kwargs.get('primary_aggregation_type', None)
self.metric_availabilities = kwargs.get('metric_availabilities', None)
self.id = kwargs.get('id', None)
class MetricDefinitionCollection(msrest.serialization.Model):
"""Represents collection of metric definitions.
All required parameters must be populated in order to send to Azure.
:param value: Required. the values for the metric definitions.
:type value: list[~$(python-base-namespace).v2016_03_01.models.MetricDefinition]
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[MetricDefinition]'},
}
def __init__(
self,
**kwargs
):
super(MetricDefinitionCollection, self).__init__(**kwargs)
self.value = kwargs['value']
class RetentionPolicy(msrest.serialization.Model):
"""Specifies the retention policy for the log.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. a value indicating whether the retention policy is enabled.
:type enabled: bool
:param days: Required. the number of days for the retention in days. A value of 0 will retain
the events indefinitely.
:type days: int
"""
_validation = {
'enabled': {'required': True},
'days': {'required': True, 'minimum': 0},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'days': {'key': 'days', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RetentionPolicy, self).__init__(**kwargs)
self.enabled = kwargs['enabled']
self.days = kwargs['days']
class RuleAction(msrest.serialization.Model):
"""The action that is performed when the alert rule becomes active, and when an alert condition is resolved.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: RuleEmailAction, RuleWebhookAction.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. specifies the type of the action. There are two types of actions:
RuleEmailAction and RuleWebhookAction.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': 'odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'Microsoft.Azure.Management.Insights.Models.RuleEmailAction': 'RuleEmailAction', 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction': 'RuleWebhookAction'}
}
def __init__(
self,
**kwargs
):
super(RuleAction, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class RuleDataSource(msrest.serialization.Model):
"""The resource from which the rule collects its data.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: RuleManagementEventDataSource, RuleMetricDataSource.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. specifies the type of data source. There are two types of rule
data sources: RuleMetricDataSource and RuleManagementEventDataSource.Constant filled by server.
:type odata_type: str
:param resource_uri: the resource identifier of the resource the rule monitors. **NOTE**\ :
this property cannot be updated for an existing rule.
:type resource_uri: str
:param legacy_resource_id: the legacy resource identifier of the resource the rule monitors.
**NOTE**\ : this property cannot be updated for an existing rule.
:type legacy_resource_id: str
:param resource_location: the location of the resource.
:type resource_location: str
:param metric_namespace: the namespace of the metric.
:type metric_namespace: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': 'odata\\.type', 'type': 'str'},
'resource_uri': {'key': 'resourceUri', 'type': 'str'},
'legacy_resource_id': {'key': 'legacyResourceId', 'type': 'str'},
'resource_location': {'key': 'resourceLocation', 'type': 'str'},
'metric_namespace': {'key': 'metricNamespace', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource': 'RuleManagementEventDataSource', 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource': 'RuleMetricDataSource'}
}
def __init__(
self,
**kwargs
):
super(RuleDataSource, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.resource_uri = kwargs.get('resource_uri', None)
self.legacy_resource_id = kwargs.get('legacy_resource_id', None)
self.resource_location = kwargs.get('resource_location', None)
self.metric_namespace = kwargs.get('metric_namespace', None)
class RuleEmailAction(RuleAction):
"""Specifies the action to send email when the rule condition is evaluated. The discriminator is always RuleEmailAction in this case.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. specifies the type of the action. There are two types of actions:
RuleEmailAction and RuleWebhookAction.Constant filled by server.
:type odata_type: str
:param send_to_service_owners: Whether the administrators (service and co-administrators) of
the service should be notified when the alert is activated.
:type send_to_service_owners: bool
:param custom_emails: the list of administrator's custom email addresses to notify of the
activation of the alert.
:type custom_emails: list[str]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': 'odata\\.type', 'type': 'str'},
'send_to_service_owners': {'key': 'sendToServiceOwners', 'type': 'bool'},
'custom_emails': {'key': 'customEmails', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(RuleEmailAction, self).__init__(**kwargs)
self.odata_type = 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction' # type: str
self.send_to_service_owners = kwargs.get('send_to_service_owners', None)
self.custom_emails = kwargs.get('custom_emails', None)
class RuleManagementEventClaimsDataSource(msrest.serialization.Model):
"""The claims for a rule management event data source.
:param email_address: the email address.
:type email_address: str
"""
_attribute_map = {
'email_address': {'key': 'emailAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RuleManagementEventClaimsDataSource, self).__init__(**kwargs)
self.email_address = kwargs.get('email_address', None)
class RuleManagementEventDataSource(RuleDataSource):
"""A rule management event data source. The discriminator fields is always RuleManagementEventDataSource in this case.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. specifies the type of data source. There are two types of rule
data sources: RuleMetricDataSource and RuleManagementEventDataSource.Constant filled by server.
:type odata_type: str
:param resource_uri: the resource identifier of the resource the rule monitors. **NOTE**\ :
this property cannot be updated for an existing rule.
:type resource_uri: str
:param legacy_resource_id: the legacy resource identifier of the resource the rule monitors.
**NOTE**\ : this property cannot be updated for an existing rule.
:type legacy_resource_id: str
:param resource_location: the location of the resource.
:type resource_location: str
:param metric_namespace: the namespace of the metric.
:type metric_namespace: str
:param event_name: the event name.
:type event_name: str
:param event_source: the event source.
:type event_source: str
:param level: the level.
:type level: str
:param operation_name: The name of the operation that should be checked for. If no name is
provided, any operation will match.
:type operation_name: str
:param resource_group_name: the resource group name.
:type resource_group_name: str
:param resource_provider_name: the resource provider name.
:type resource_provider_name: str
:param status: The status of the operation that should be checked for. If no status is
provided, any status will match.
:type status: str
:param sub_status: the substatus.
:type sub_status: str
:param claims: the claims.
:type claims: ~$(python-base-namespace).v2016_03_01.models.RuleManagementEventClaimsDataSource
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': 'odata\\.type', 'type': 'str'},
'resource_uri': {'key': 'resourceUri', 'type': 'str'},
'legacy_resource_id': {'key': 'legacyResourceId', 'type': 'str'},
'resource_location': {'key': 'resourceLocation', 'type': 'str'},
'metric_namespace': {'key': 'metricNamespace', 'type': 'str'},
'event_name': {'key': 'eventName', 'type': 'str'},
'event_source': {'key': 'eventSource', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'operation_name': {'key': 'operationName', 'type': 'str'},
'resource_group_name': {'key': 'resourceGroupName', 'type': 'str'},
'resource_provider_name': {'key': 'resourceProviderName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'sub_status': {'key': 'subStatus', 'type': 'str'},
'claims': {'key': 'claims', 'type': 'RuleManagementEventClaimsDataSource'},
}
def __init__(
self,
**kwargs
):
super(RuleManagementEventDataSource, self).__init__(**kwargs)
self.odata_type = 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource' # type: str
self.event_name = kwargs.get('event_name', None)
self.event_source = kwargs.get('event_source', None)
self.level = kwargs.get('level', None)
self.operation_name = kwargs.get('operation_name', None)
self.resource_group_name = kwargs.get('resource_group_name', None)
self.resource_provider_name = kwargs.get('resource_provider_name', None)
self.status = kwargs.get('status', None)
self.sub_status = kwargs.get('sub_status', None)
self.claims = kwargs.get('claims', None)
class RuleMetricDataSource(RuleDataSource):
"""A rule metric data source. The discriminator value is always RuleMetricDataSource in this case.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. specifies the type of data source. There are two types of rule
data sources: RuleMetricDataSource and RuleManagementEventDataSource.Constant filled by server.
:type odata_type: str
:param resource_uri: the resource identifier of the resource the rule monitors. **NOTE**\ :
this property cannot be updated for an existing rule.
:type resource_uri: str
:param legacy_resource_id: the legacy resource identifier of the resource the | |
# remove articles from library
for article in articles:
self._library.delete(article)
# refresh collections view
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
def _on_articles_trash(self, evt=None):
"""Marks selected articles as deleted."""
# get selected articles
articles = self._articles_view.GetSelectedArticles()
if not articles:
return
# update library
self._library.trash(articles, True)
# refresh collections view
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
def _on_articles_restore(self, evt=None):
"""Marks selected articles as not deleted."""
# get selected articles
articles = self._articles_view.GetSelectedArticles()
if not articles:
return
# update library
self._library.trash(articles, False)
# refresh collections view
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
def _on_articles_rating(self, evt=None, rating=None):
"""Sets new rating to selected articles."""
# get selected articles
articles = self._articles_view.GetSelectedArticles()
if not articles:
return
# get rating from event
if evt is not None:
evt_id = evt.GetId()
if evt_id == ID_ARTICLES_RATING_0:
rating = 0
elif evt_id == ID_ARTICLES_RATING_1:
rating = 1
elif evt_id == ID_ARTICLES_RATING_2:
rating = 2
elif evt_id == ID_ARTICLES_RATING_3:
rating = 3
elif evt_id == ID_ARTICLES_RATING_4:
rating = 4
elif evt_id == ID_ARTICLES_RATING_5:
rating = 5
else:
return
# check rating
if rating is None:
return
# set rating and update library
for article in articles:
article.rating = rating
self._library.update(article)
# refresh collections view
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
# re-select articles
self._articles_view.SetSelectedArticles(articles)
def _on_articles_colour(self, evt=None, colour=None):
"""Sets new colour to selected articles."""
# get selected articles
articles = self._articles_view.GetSelectedArticles()
if not articles:
return
# get colour from event
if evt is not None:
evt_id = evt.GetId()
if evt_id == ID_ARTICLES_COLOUR_GRAY:
colour = mwx.COLOUR_BULLET_GRAY
elif evt_id == ID_ARTICLES_COLOUR_RED:
colour = mwx.COLOUR_BULLET_RED
elif evt_id == ID_ARTICLES_COLOUR_ORANGE:
colour = mwx.COLOUR_BULLET_ORANGE
elif evt_id == ID_ARTICLES_COLOUR_YELLOW:
colour = mwx.COLOUR_BULLET_YELLOW
elif evt_id == ID_ARTICLES_COLOUR_GREEN:
colour = mwx.COLOUR_BULLET_GREEN
elif evt_id == ID_ARTICLES_COLOUR_BLUE:
colour = mwx.COLOUR_BULLET_BLUE
elif evt_id == ID_ARTICLES_COLOUR_PURPLE:
colour = mwx.COLOUR_BULLET_PURPLE
else:
return
# remove gray
if colour == mwx.COLOUR_BULLET_GRAY:
colour = None
# set colour and update library
for article in articles:
article.colour = mwx.rgb_to_hex(colour) if colour else None
self._library.update(article)
# refresh collections view
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
# re-select articles
self._articles_view.SetSelectedArticles(articles)
def _on_articles_labels(self, evt=None):
"""Sets new labels to selected articles."""
# get selected articles
articles = self._articles_view.GetSelectedArticles()
if not articles:
return
# get available labels
labels = self._library.search(core.Query("", core.Label.NAME))
# set labels
dlg = LabelsView(self, articles, labels)
response = dlg.ShowModal()
dlg.Destroy()
# check response
if response != wx.ID_OK:
return
# update library
for article in articles:
self._library.update(article)
# refresh collections view
self._collections_view.UpdateLabelsCollections()
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
# re-select articles
self._articles_view.SetSelectedArticles(articles)
def _on_articles_match(self, evt=None):
"""Finds and updates article by on-line match."""
# get selected articles
articles = self._articles_view.GetSelectedArticles()
if not articles:
return
# select master article
article = articles[0]
# raise repository search dialog
dlg = RepositoryView(self, self._library, article=article)
response = dlg.ShowModal()
matches = dlg.GetSelectedArticles()
dlg.Destroy()
# check response
if response != wx.ID_OK or not matches:
return
# get match
match = matches[0]
# update article attributes
if match.doi:
article.doi = match.doi
if match.pmid:
article.pmid = match.pmid
if match.year:
article.year = match.year
if match.volume:
article.volume = match.volume
if match.issue:
article.issue = match.issue
if match.pages:
article.pages = match.pages
if match.title:
article.title = match.title
if match.abstract:
article.abstract = match.abstract
if match.journal:
article.journal = match.journal
if match.authors:
article.authors = match.authors
# update library
self._library.update(article)
# refresh collections view
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
self._articles_view.SetSelectedArticles([article])
def _on_articles_update(self, evt=None):
"""Updates articles by on-line match."""
# get selected articles with PubMed ID
articles = self._articles_view.GetSelectedArticles()
articles = [a for a in articles if a.pmid is not None]
if not articles:
return
# update articles by PubMed
self._articles_update_async(articles)
# refresh collections view
self._collections_view.UpdateLabelsCollections()
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
# re-select articles
self._articles_view.SetSelectedArticles(articles)
def _on_articles_attach_pdf(self, evt=None):
"""Attaches PDF to selected article."""
# get selected articles
articles = self._articles_view.GetSelectedArticles()
if not articles:
return
# select master article
article = articles[0]
# raise open dialog
wildcard = "Adobe PDF File (*.pdf)|*.pdf"
dlg = wx.FileDialog(self, "Attach PDF", "", "", wildcard=wildcard, style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
dlg.Destroy()
else:
dlg.Destroy()
return
# set PDF to article
article.pdf = True
# copy PDF into library folder
shutil.copy(path, article.pdf_path)
# update library
self._library.update(article)
# refresh collections view
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
self._articles_view.SetSelectedArticles([article])
def _on_articles_to_collection(self, evt):
"""Adds or removes articles to/from manual collection."""
# get selected articles
articles = self._articles_view.GetSelectedArticles()
if not articles:
return
# set direction
insert = not evt.collection_status
# create collection
collection = core.Collection(dbid=evt.collection_dbid)
# set articles collection
self._library.collect(articles, collection, insert)
# refresh collections view
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
# re-select articles
self._articles_view.SetSelectedArticles(articles)
def _on_articles_dropped_to_trash(self, evt):
"""Removes articles dropped to trash collection."""
# get articles
articles = [core.Article(dbid=i) for i in evt.articles_dbids]
# update library
self._library.trash(articles, True)
# refresh collections view
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
def _on_articles_dropped_to_collection(self, evt):
"""Adds articles to dropped manual collection."""
# get articles
articles = [core.Article(dbid=i) for i in evt.articles_dbids]
# create collection
collection = core.Collection(dbid=evt.collection_dbid)
# set articles collection
self._library.collect(articles, collection, True)
# refresh collections view
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
# re-select articles
self._articles_view.SetSelectedArticles(articles)
def _on_articles_dropped_to_label(self, evt):
# get articles
articles = [core.Article(dbid=i) for i in evt.articles_dbids]
# create label
label = core.Label(title=evt.label_title)
# set articles label
self._library.label(articles, label, True)
# refresh collections view
self._collections_view.UpdateCounts()
# refresh articles view
self._articles_view.ShowArticles()
# re-select articles
self._articles_view.SetSelectedArticles(articles)
def _on_details_navigating(self, evt):
"""Handles details navigating event."""
# get URL
url = evt.url
# parse URL
match = DETAILS_URL_PATTERN.search(url)
if not match:
return
# get match
parameter = match.group('parameter')
value = match.group('value').replace("%20", " ")
# check value
if not value:
return
# show article by DOI
if parameter == 'doi':
link = "https://dx.doi.org/%s" % value
try: webbrowser.open(link, autoraise=1)
except: pass
# show article by PMID (in PubMed)
elif parameter == 'pmid':
link = "https://ncbi.nlm.nih.gov/pubmed/%s" % value
try: webbrowser.open(link, autoraise=1)
except: pass
# search by author (in PubMed)
elif parameter == 'author':
query = "%s[AU]" % value
self._search_repository(query)
# search by journal (in PubMed)
elif parameter == 'journal':
query = "%s[JT]" % value
self._search_repository(query)
# show articles by author (in library)
elif parameter == 'authorid':
query = "%s[AUID]" % value
self._articles_view.SetMasterQuery(None)
self._articles_view.SetQuery(query)
self._articles_view.ShowArticles()
# show articles by label (in library)
elif parameter == 'labelid':
query = "%s[LABELID]" % value
self._articles_view.SetMasterQuery(None)
self._articles_view.SetQuery(query)
self._articles_view.ShowArticles()
# show articles by collection (in library)
elif parameter == 'collectionid':
query = "%s[COLLECTIONID]" % value
self._articles_view.SetMasterQuery(None)
self._articles_view.SetQuery(query)
self._articles_view.ShowArticles()
# set article rating
elif parameter == 'rating':
if value in "012345":
self._on_articles_rating(rating=int(value))
# set article colour
elif parameter == 'colour':
colour = mwx.COLOUR_BULLETS.get(value, None)
if colour is not None:
self._on_articles_colour(colour=colour)
# reveal PDF file
elif parameter == 'pdf':
path = os.path.join(self._library.library_path, value+".pdf")
self._on_articles_reveal_pdf(path=path)
def _on_repository_search(self, evt):
"""Searches on-line repository and imports selected articles."""
# init query
query = getattr(evt, "query", "")
# get selected articles
articles = self._articles_view.GetSelectedArticles()
# make requested query from first article
if articles:
article = articles[0]
if evt.GetId() == ID_REPOSITORY_RECENT_FIRST_AUTHOR and article.authors:
query = "%s[AU]" % article.authors[0].shortname
elif evt.GetId() == ID_REPOSITORY_RECENT_LAST_AUTHOR and article.authors:
query = "%s[AU]" % article.authors[-1].shortname
elif evt.GetId() == ID_REPOSITORY_RECENT_JOURNAL and article.journal:
query = "%s[JT]" % article.journal.abbreviation
# search repository
self._search_repository(query)
def _on_authors_list(self, evt):
"""Shows dialog to manage authors."""
# raise authors | |
the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_namespaced_secret(body, namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param V1DeleteOptions body: (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Secret (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: UnversionedStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'namespace', 'name', 'pretty']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_secret" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_secret`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_secret`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_secret`")
resource_path = '/api/v1/namespaces/{namespace}/secrets/{name}'.replace('{format}', 'json')
method = 'DELETE'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='UnversionedStatus',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def patch_namespaced_secret(self, body, namespace, name, **kwargs):
"""
partially update the specified Secret
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_namespaced_secret(body, namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param UnversionedPatch body: (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Secret (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Secret
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'namespace', 'name', 'pretty']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_secret" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_secret`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_secret`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_secret`")
resource_path = '/api/v1/namespaces/{namespace}/secrets/{name}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='V1Secret',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_namespaced_service_account(self, namespace, **kwargs):
"""
list or watch objects of kind ServiceAccount
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_namespaced_service_account(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: V1ServiceAccountList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_service_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_service_account`")
resource_path = '/api/v1/namespaces/{namespace}/serviceaccounts'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='V1ServiceAccountList',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_namespaced_service_account(self, body, namespace, **kwargs):
"""
create a ServiceAccount
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_namespaced_service_account(body, namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param V1ServiceAccount body: (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1ServiceAccount
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'namespace', 'pretty']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_service_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_service_account`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_service_account`")
resource_path = '/api/v1/namespaces/{namespace}/serviceaccounts'.replace('{format}', 'json')
method = 'POST'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='V1ServiceAccount',
| |
import copy
import numpy as np
from scipy import ndimage
import gnomonic_projection as gp
import spherical_coordinates as sc
import polygon
from logger import Logger
log = Logger(__name__)
log.logger.propagate = False
"""
Implement icosahedron projection and stitch with the Gnomonic projection (forward and reverse projection).
Reference:
[1]: https://mathworld.wolfram.com/GnomonicProjection.html
"""
def get_icosahedron_parameters(triangle_index, padding_size=0.0):
"""
Get icosahedron's tangent face's paramters.
Get the tangent point theta and phi. Known as the theta_0 and phi_0.
The erp image origin as top-left corner
:return the tangent face's tangent point and 3 vertices's location.
"""
# reference: https://en.wikipedia.org/wiki/Regular_icosahedron
radius_circumscribed = np.sin(2 * np.pi / 5.0)
radius_inscribed = np.sqrt(3) / 12.0 * (3 + np.sqrt(5))
radius_midradius = np.cos(np.pi / 5.0)
# the tangent point
theta_0 = None
phi_0 = None
# the 3 points of tangent triangle in spherical coordinate
triangle_point_00_theta = None
triangle_point_00_phi = None
triangle_point_01_theta = None
triangle_point_01_phi = None
triangle_point_02_theta = None
triangle_point_02_phi = None
# triangles' row/col range in the erp image
# erp_image_row_start = None
# erp_image_row_stop = None
# erp_image_col_start = None
# erp_image_col_stop = None
theta_step = 2.0 * np.pi / 5.0
# 1) the up 5 triangles
if 0 <= triangle_index <= 4:
# tangent point of inscribed spheric
theta_0 = - np.pi + theta_step / 2.0 + triangle_index * theta_step
phi_0 = np.pi / 2 - np.arccos(radius_inscribed / radius_circumscribed)
# the tangent triangle points coordinate in tangent image
triangle_point_00_theta = -np.pi + triangle_index * theta_step
triangle_point_00_phi = np.arctan(0.5)
triangle_point_01_theta = -np.pi + np.pi * 2.0 / 5.0 / 2.0 + triangle_index * theta_step
triangle_point_01_phi = np.pi / 2.0
triangle_point_02_theta = -np.pi + (triangle_index + 1) * theta_step
triangle_point_02_phi = np.arctan(0.5)
# # availied area of ERP image
# erp_image_row_start = 0
# erp_image_row_stop = (np.pi / 2 - np.arctan(0.5)) / np.pi
# erp_image_col_start = 1.0 / 5.0 * triangle_index_temp
# erp_image_col_stop = 1.0 / 5.0 * (triangle_index_temp + 1)
# 2) the middle 10 triangles
# 2-0) middle-up triangles
if 5 <= triangle_index <= 9:
triangle_index_temp = triangle_index - 5
# tangent point of inscribed spheric
theta_0 = - np.pi + theta_step / 2.0 + triangle_index_temp * theta_step
phi_0 = np.pi / 2.0 - np.arccos(radius_inscribed / radius_circumscribed) - 2 * np.arccos(radius_inscribed / radius_midradius)
# the tangent triangle points coordinate in tangent image
triangle_point_00_theta = -np.pi + triangle_index_temp * theta_step
triangle_point_00_phi = np.arctan(0.5)
triangle_point_01_theta = -np.pi + (triangle_index_temp + 1) * theta_step
triangle_point_01_phi = np.arctan(0.5)
triangle_point_02_theta = -np.pi + theta_step / 2.0 + triangle_index_temp * theta_step
triangle_point_02_phi = -np.arctan(0.5)
# # availied area of ERP image
# erp_image_row_start = (np.arccos(radius_inscribed / radius_circumscribed) + np.arccos(radius_inscribed / radius_midradius)) / np.pi
# erp_image_row_stop = (np.pi / 2.0 + np.arctan(0.5)) / np.pi
# erp_image_col_start = 1 / 5.0 * triangle_index_temp
# erp_image_col_stop = 1 / 5.0 * (triangle_index_temp + 1)
# 2-1) the middle-down triangles
if 10 <= triangle_index <= 14:
triangle_index_temp = triangle_index - 10
# tangent point of inscribed spheric
theta_0 = - np.pi + triangle_index_temp * theta_step
phi_0 = -(np.pi / 2.0 - np.arccos(radius_inscribed / radius_circumscribed) - 2 * np.arccos(radius_inscribed / radius_midradius))
# the tangent triangle points coordinate in tangent image
triangle_point_00_phi = -np.arctan(0.5)
triangle_point_00_theta = - np.pi - theta_step / 2.0 + triangle_index_temp * theta_step
if triangle_index_temp == 10:
# cross the ERP image boundary
triangle_point_00_theta = triangle_point_00_theta + 2 * np.pi
triangle_point_01_theta = -np.pi + triangle_index_temp * theta_step
triangle_point_01_phi = np.arctan(0.5)
triangle_point_02_theta = - np.pi + theta_step / 2.0 + triangle_index_temp * theta_step
triangle_point_02_phi = -np.arctan(0.5)
# # availied area of ERP image
# erp_image_row_start = (np.pi / 2.0 - np.arctan(0.5)) / np.pi
# erp_image_row_stop = (np.pi - np.arccos(radius_inscribed / radius_circumscribed) - np.arccos(radius_inscribed / radius_midradius)) / np.pi
# erp_image_col_start = 1.0 / 5.0 * triangle_index_temp - 1.0 / 5.0 / 2.0
# erp_image_col_stop = 1.0 / 5.0 * triangle_index_temp + 1.0 / 5.0 / 2.0
# 3) the down 5 triangles
if 15 <= triangle_index <= 19:
triangle_index_temp = triangle_index - 15
# tangent point of inscribed spheric
theta_0 = - np.pi + triangle_index_temp * theta_step
phi_0 = - (np.pi / 2 - np.arccos(radius_inscribed / radius_circumscribed))
# the tangent triangle points coordinate in tangent image
triangle_point_00_theta = - np.pi - theta_step / 2.0 + triangle_index_temp * theta_step
triangle_point_00_phi = -np.arctan(0.5)
triangle_point_01_theta = - np.pi + theta_step / 2.0 + triangle_index_temp * theta_step
# cross the ERP image boundary
if triangle_index_temp == 15:
triangle_point_01_theta = triangle_point_01_theta + 2 * np.pi
triangle_point_01_phi = -np.arctan(0.5)
triangle_point_02_theta = - np.pi + triangle_index_temp * theta_step
triangle_point_02_phi = -np.pi / 2.0
# # spherical coordinate (0,0) is in the center of ERP image
# erp_image_row_start = (np.pi / 2.0 + np.arctan(0.5)) / np.pi
# erp_image_row_stop = 1.0
# erp_image_col_start = 1.0 / 5.0 * triangle_index_temp - 1.0 / 5.0 / 2.0
# erp_image_col_stop = 1.0 / 5.0 * triangle_index_temp + 1.0 / 5.0 / 2.0
tangent_point = [theta_0, phi_0]
# the 3 vertices in tangent image's gnomonic coordinate
triangle_points_tangent = []
triangle_points_tangent.append(gp.gnomonic_projection(triangle_point_00_theta, triangle_point_00_phi, theta_0, phi_0))
triangle_points_tangent.append(gp.gnomonic_projection(triangle_point_01_theta, triangle_point_01_phi, theta_0, phi_0))
triangle_points_tangent.append(gp.gnomonic_projection(triangle_point_02_theta, triangle_point_02_phi, theta_0, phi_0))
# pading the tangent image
triangle_points_tangent_no_pading = copy.deepcopy(triangle_points_tangent) # Needed for NN blending
triangle_points_tangent_pading = polygon.enlarge_polygon(triangle_points_tangent, padding_size)
# if padding_size != 0.0:
triangle_points_tangent = copy.deepcopy(triangle_points_tangent_pading)
# the points in spherical location
triangle_points_sph = []
for index in range(3):
tri_pading_x, tri_pading_y = triangle_points_tangent_pading[index]
triangle_point_theta, triangle_point_phi = gp.reverse_gnomonic_projection(tri_pading_x, tri_pading_y, theta_0, phi_0)
triangle_points_sph.append([triangle_point_theta, triangle_point_phi])
# compute bounding box of the face in spherical coordinate
availied_sph_area = []
availied_sph_area = np.array(copy.deepcopy(triangle_points_sph))
triangle_points_tangent_pading = np.array(triangle_points_tangent_pading)
point_insert_x = np.sort(triangle_points_tangent_pading[:, 0])[1]
point_insert_y = np.sort(triangle_points_tangent_pading[:, 1])[1]
availied_sph_area = np.append(availied_sph_area, [gp.reverse_gnomonic_projection(point_insert_x, point_insert_y, theta_0, phi_0)], axis=0)
# the bounding box of the face with spherical coordinate
availied_ERP_area_sph = [] # [min_longitude, max_longitude, min_latitude, max_lantitude]
if 0 <= triangle_index <= 4:
if padding_size > 0.0:
availied_ERP_area_sph.append(-np.pi)
availied_ERP_area_sph.append(np.pi)
else:
availied_ERP_area_sph.append(np.amin(availied_sph_area[:, 0]))
availied_ERP_area_sph.append(np.amax(availied_sph_area[:, 0]))
availied_ERP_area_sph.append(np.pi / 2.0)
availied_ERP_area_sph.append(np.amin(availied_sph_area[:, 1])) # the ERP Y axis direction as down
elif 15 <= triangle_index <= 19:
if padding_size > 0.0:
availied_ERP_area_sph.append(-np.pi)
availied_ERP_area_sph.append(np.pi)
else:
availied_ERP_area_sph.append(np.amin(availied_sph_area[:, 0]))
availied_ERP_area_sph.append(np.amax(availied_sph_area[:, 0]))
availied_ERP_area_sph.append(np.amax(availied_sph_area[:, 1]))
availied_ERP_area_sph.append(-np.pi / 2.0)
else:
availied_ERP_area_sph.append(np.amin(availied_sph_area[:, 0]))
availied_ERP_area_sph.append(np.amax(availied_sph_area[:, 0]))
availied_ERP_area_sph.append(np.amax(availied_sph_area[:, 1]))
availied_ERP_area_sph.append(np.amin(availied_sph_area[:, 1]))
# else:
# triangle_points_sph.append([triangle_point_00_theta, triangle_point_00_theta])
# triangle_points_sph.append([triangle_point_01_theta, triangle_point_01_theta])
# triangle_points_sph.append([triangle_point_02_theta, triangle_point_02_theta])
# availied_ERP_area.append(erp_image_row_start)
# availied_ERP_area.append(erp_image_row_stop)
# availied_ERP_area.append(erp_image_col_start)
# availied_ERP_area.append(erp_image_col_stop)
return {"tangent_point": tangent_point, "triangle_points_tangent": triangle_points_tangent,
"triangle_points_sph": triangle_points_sph,
"triangle_points_tangent_nopad": triangle_points_tangent_no_pading, "availied_ERP_area": availied_ERP_area_sph}
def erp2ico_image(erp_image, tangent_image_width, padding_size=0.0, full_face_image=False):
"""Project the equirectangular image to 20 triangle images.
Project the equirectangular image to level-0 icosahedron.
:param erp_image: the input equirectangular image, RGB image should be 3 channel [H,W,3], depth map' shape should be [H,W].
:type erp_image: numpy array, [height, width, 3]
:param tangent_image_width: the output triangle image size, defaults to 480
:type tangent_image_width: int, optional
:param padding_size: the output face image' padding size
:type padding_size: float
:param full_face_image: If yes project all pixels in the face image, no just project the pixels in the face triangle, defaults to False
:type full_face_image: bool, optional
:param depthmap_enable: if project depth map, return the each pixel's 3D points location in current camera coordinate system.
:type depthmap_enable: bool
:return: If erp is rgb image:
1) a list contain 20 triangle images, the image is 4 channels, invalided pixel's alpha is 0, others is 1.
2)
3) None.
If erp is depth map:
1) a list contain 20 triangle images depth maps in tangent coordinate system. The subimage's depth is 3D point could depth value.
2)
3) 3D point cloud in tangent coordinate system. The pangent point cloud coordinate system is same as the world coordinate system. +y down, +x right and +z forward.
:rtype:
"""
if full_face_image:
log.debug("Generate rectangle tangent image.")
else:
log.debug("Generating triangle tangent image.")
# ERP image size
depthmap_enable = False
if len(erp_image.shape) == 3:
if np.shape(erp_image)[2] == 4:
log.info("project ERP image is 4 channels RGB map")
erp_image = erp_image[:, :, 0:3]
log.info("project ERP image 3 channels RGB map")
elif len(erp_image.shape) == 2:
log.info("project ERP image is single channel depth map")
erp_image = np.expand_dims(erp_image, axis=2)
depthmap_enable = True
erp_image_height = np.shape(erp_image)[0]
erp_image_width = np.shape(erp_image)[1]
channel_number | |
<reponame>ldn-softdev/pyeapi
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Module for working with EOS VLAN resources
The Vlans resource provides configuration of VLAN resources for an EOS
node.
Parameters:
name (string): The name parameter maps to the VLAN name in EOS. Valid
values include any consecutive sequence of numbers, letters and
underscore up to the maximum number of characters. This parameter
is defaultable.
state (string): The state parameter sets the operational state of
the VLAN on the node. It has two valid values: active or suspend.
The state parameter is defaultable.
trunk_groups (array): The trunk_groups parameter provides a list of
trunk groups configured for this VLAN. This parameter is
defaultable.
"""
import re
from pyeapi.api import EntityCollection
from pyeapi.utils import make_iterable
VLAN_ID_RE = re.compile(r'(?:vlan\s)(?P<value>.*)$', re.M)
NAME_RE = re.compile(r'(?:name\s)(?P<value>.*)$', re.M)
STATE_RE = re.compile(r'(?:state\s)(?P<value>.*)$', re.M)
TRUNK_GROUP_RE = re.compile(r'(?:trunk\sgroup\s)(?P<value>.*)$', re.M)
def isvlan(value):
"""Checks if the argument is a valid VLAN
A valid VLAN is an integer value in the range of 1 to 4094. This
function will test if the argument falls into the specified range and
is considered a valid VLAN
Args:
value: The value to check if is a valid VLAN
Returns:
True if the supplied value is a valid VLAN otherwise False
"""
try:
value = int(value)
return value in range(1, 4095)
except ValueError:
return False
class Vlans(EntityCollection):
"""The Vlans class provides a configuration resource for VLANs
The Vlans class is derived from ResourceBase a standard set of methods
for working with VLAN configurations on an EOS node.
"""
def get(self, value):
"""Returns the VLAN configuration as a resource dict.
Args:
vid (string): The vlan identifier to retrieve from the
running configuration. Valid values are in the range
of 1 to 4095
Returns:
A Python dict object containing the VLAN attributes as
key/value pairs.
"""
config = self.get_block('vlan %s' % value)
if not config:
return None
response = dict(vlan_id=self._parse_vlan_id(config))
response.update(self._parse_name(config))
response.update(self._parse_state(config))
response.update(self._parse_trunk_groups(config))
return response
def _parse_vlan_id(self, config):
""" _parse_vlan_id scans the provided configuration block and extracts
the vlan id. The config block is expected to always return the
vlan id. The return dict is intended to be merged into the response
dict.
Args:
config (str): The vlan configuration block from the nodes running
configuration
Returns:
Str: vlan id (or range/list of vlan ids)
"""
value = VLAN_ID_RE.search(config).group('value')
return value
def _parse_name(self, config):
""" _parse_name scans the provided configuration block and extracts
the vlan name. The config block is expected to always return the
vlan name. The return dict is intended to be merged into the response
dict.
Args:
config (str): The vlan configuration block from the nodes running
configuration
Returns:
dict: resource dict attribute
"""
value = NAME_RE.search(config).group('value')
return dict(name=value)
def _parse_state(self, config):
""" _parse_state scans the provided configuration block and extracts
the vlan state value. The config block is expected to always return
the vlan state config. The return dict is inteded to be merged into
the response dict.
Args:
config (str): The vlan configuration block from the nodes
running configuration
Returns:
dict: resource dict attribute
"""
value = STATE_RE.search(config).group('value')
return dict(state=value)
def _parse_trunk_groups(self, config):
""" _parse_trunk_groups scans the provided configuration block and
extracts all the vlan trunk groups. If no trunk groups are configured
an empty List is returned as the vlaue. The return dict is intended
to be merged into the response dict.
Args:
config (str): The vlan configuration block form the node's
running configuration
Returns:
dict: resource dict attribute
"""
values = TRUNK_GROUP_RE.findall(config)
return dict(trunk_groups=values)
def getall(self):
"""Returns a dict object of all Vlans in the running-config
Returns:
A dict object of Vlan attributes
"""
# regex to find standalone and grouped (ranged, enumerated) vlans (#197)
vlans_re = re.compile(r'(?<=^vlan\s)[\d,\-]+', re.M)
response = dict()
for vid in vlans_re.findall(self.config):
response[vid] = self.get(vid)
return response
def create(self, vid):
""" Creates a new VLAN resource
Args:
vid (str): The VLAN ID to create
Returns:
True if create was successful otherwise False
"""
command = 'vlan %s' % vid
return self.configure(command) if isvlan(vid) else False
def delete(self, vid):
""" Deletes a VLAN from the running configuration
Args:
vid (str): The VLAN ID to delete
Returns:
True if the operation was successful otherwise False
"""
command = 'no vlan %s' % vid
return self.configure(command) if isvlan(vid) else False
def default(self, vid):
""" Defaults the VLAN configuration
.. code-block:: none
default vlan <vlanid>
Args:
vid (str): The VLAN ID to default
Returns:
True if the operation was successful otherwise False
"""
command = 'default vlan %s' % vid
return self.configure(command) if isvlan(vid) else False
def configure_vlan(self, vid, commands):
""" Configures the specified Vlan using commands
Args:
vid (str): The VLAN ID to configure
commands: The list of commands to configure
Returns:
True if the commands completed successfully
"""
commands = make_iterable(commands)
commands.insert(0, 'vlan %s' % vid)
return self.configure(commands)
def set_name(self, vid, name=None, default=False, disable=False):
""" Configures the VLAN name
EosVersion:
4.13.7M
Args:
vid (str): The VLAN ID to Configures
name (str): The value to configure the vlan name
default (bool): Defaults the VLAN ID name
disable (bool): Negates the VLAN ID name
Returns:
True if the operation was successful otherwise False
"""
cmds = self.command_builder('name', value=name, default=default,
disable=disable)
return self.configure_vlan(vid, cmds)
def set_state(self, vid, value=None, default=False, disable=False):
""" Configures the VLAN state
EosVersion:
4.13.7M
Args:
vid (str): The VLAN ID to configure
value (str): The value to set the vlan state to
default (bool): Configures the vlan state to its default value
disable (bool): Negates the vlan state
Returns:
True if the operation was successful otherwise False
"""
cmds = self.command_builder('state', value=value, default=default,
disable=disable)
return self.configure_vlan(vid, cmds)
def set_trunk_groups(self, vid, value=None, default=False, disable=False):
""" Configures the list of trunk groups support on a vlan
This method handles configuring the vlan trunk group value to default
if the default flag is set to True. If the default flag is set
to False, then this method will calculate the set of trunk
group names to be added and to be removed.
EosVersion:
4.13.7M
Args:
vid (str): The VLAN ID to configure
value (str): The list of trunk groups that should be configured
for this vlan id.
default (bool): Configures the trunk group value to default if
this value is true
disable (bool): Negates the trunk group value if set to true
Returns:
True if the operation was successful otherwise False
"""
if default:
return self.configure_vlan(vid, 'default trunk group')
if disable:
return self.configure_vlan(vid, 'no trunk group')
current_value = self.get(vid)['trunk_groups']
failure = False
value = make_iterable(value)
for name in set(value).difference(current_value):
if not self.add_trunk_group(vid, name):
failure = True
for name in set(current_value).difference(value):
if not self.remove_trunk_group(vid, name):
failure = True
return not failure
| |
<filename>python-shell/src/test/test_gaffer_operations.py
#
# Copyright 2016-2019 Crown Copyright
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import unittest
from gafferpy import gaffer as g
class GafferOperationsTest(unittest.TestCase):
examples = [
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.add.AddElements",
"validate" : true,
"skipInvalidElements" : false,
"input" : [ {
"group" : "entity",
"vertex" : 6,
"properties" : {
"count" : 1
},
"class" : "uk.gov.gchq.gaffer.data.element.Entity"
}, {
"group" : "edge",
"source" : 5,
"destination" : 6,
"directed" : true,
"properties" : {
"count" : 1
},
"class" : "uk.gov.gchq.gaffer.data.element.Edge"
} ]
}
''',
g.AddElements(
skip_invalid_elements=False,
input=[
g.Entity(
vertex=6,
properties={'count': 1},
group="entity"
),
g.Edge(
destination=6,
source=5,
group="edge",
properties={'count': 1},
directed=True
)
],
validate=True
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.add.AddElementsFromFile",
"filename" : "filename",
"elementGenerator" : "uk.gov.gchq.gaffer.doc.operation.generator.ElementGenerator",
"parallelism" : 1,
"validate" : true,
"skipInvalidElements" : false
}
''',
g.AddElementsFromFile(
parallelism=1,
validate=True,
element_generator="uk.gov.gchq.gaffer.doc.operation.generator.ElementGenerator",
filename="filename",
skip_invalid_elements=False
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.add.AddElementsFromKafka",
"topic" : "topic1",
"groupId" : "groupId1",
"bootstrapServers" : [ "hostname1:8080,hostname2:8080" ],
"elementGenerator" : "uk.gov.gchq.gaffer.doc.operation.generator.ElementGenerator",
"parallelism" : 1,
"validate" : true,
"skipInvalidElements" : false
}
''',
g.AddElementsFromKafka(
topic="topic1",
parallelism=1,
skip_invalid_elements=False,
validate=True,
bootstrap_servers=[
"hostname1:8080,hostname2:8080"
],
element_generator="uk.gov.gchq.gaffer.doc.operation.generator.ElementGenerator",
group_id="groupId1"
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.add.AddElementsFromSocket",
"hostname" : "localhost",
"port" : 8080,
"elementGenerator" : "uk.gov.gchq.gaffer.doc.operation.generator.ElementGenerator",
"parallelism" : 1,
"validate" : true,
"skipInvalidElements" : false,
"delimiter" : ","
}
''',
g.AddElementsFromSocket(
validate=True,
element_generator="uk.gov.gchq.gaffer.doc.operation.generator.ElementGenerator",
parallelism=1,
delimiter=",",
hostname="localhost",
skip_invalid_elements=False,
port=8080
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.CountGroups"
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(),
g.CountGroups()
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.CountGroups",
"limit" : 5
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(),
g.CountGroups(
limit=5
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.resultcache.ExportToGafferResultCache"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.DiscardOutput"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.resultcache.GetGafferResultCacheExport",
"key" : "ALL"
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(),
g.ExportToGafferResultCache(),
g.DiscardOutput(),
g.GetGafferResultCacheExport(
key="ALL"
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.resultcache.ExportToGafferResultCache"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.DiscardOutput"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.job.GetJobDetails"
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(),
g.ExportToGafferResultCache(),
g.DiscardOutput(),
g.GetJobDetails()
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.resultcache.GetGafferResultCacheExport",
"jobId" : "0f47bc2a-547d-4990-9104-04a8dd64e588",
"key" : "ALL"
} ]
}
''',
g.OperationChain(
operations=[
g.GetGafferResultCacheExport(
job_id="0f47bc2a-547d-4990-9104-04a8dd64e588",
key="ALL"
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.resultcache.ExportToGafferResultCache",
"key" : "edges"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.DiscardOutput"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.resultcache.ExportToGafferResultCache",
"key" : "entities"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.DiscardOutput"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.GetExports",
"getExports" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.resultcache.GetGafferResultCacheExport",
"key" : "edges"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.resultcache.GetGafferResultCacheExport",
"key" : "entities"
} ]
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(),
g.ExportToGafferResultCache(
key="edges"
),
g.DiscardOutput(),
g.GetAllElements(),
g.ExportToGafferResultCache(
key="entities"
),
g.DiscardOutput(),
g.GetExports(
get_exports=[
g.GetGafferResultCacheExport(
key="edges"
),
g.GetGafferResultCacheExport(
key="entities"
)
]
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements",
"view" : {
"edges" : {
"edge" : { }
},
"entities" : { }
}
}, {
"class" : "uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherAuthorisedGraph",
"graphId" : "graph2"
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(
view=g.View(
edges=[
g.ElementDefinition(
group="edge"
)
],
entities=[
]
)
),
g.ExportToOtherAuthorisedGraph(
graph_id="graph2"
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements",
"view" : {
"edges" : {
"edge" : { }
},
"entities" : { }
}
}, {
"class" : "uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherAuthorisedGraph",
"graphId" : "newGraphId",
"parentSchemaIds" : [ "schemaId1" ],
"parentStorePropertiesId" : "storePropsId1"
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(
view=g.View(
entities=[
],
edges=[
g.ElementDefinition(
group="edge"
)
]
)
),
g.ExportToOtherAuthorisedGraph(
parent_schema_ids=[
"schemaId1"
],
graph_id="newGraphId",
parent_store_properties_id="storePropsId1"
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements",
"view" : {
"edges" : {
"edge" : { }
},
"entities" : { }
}
}, {
"class" : "uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherGraph",
"graphId" : "newGraphId"
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(
view=g.View(
entities=[
],
edges=[
g.ElementDefinition(
group="edge"
)
]
)
),
g.ExportToOtherGraph(
graph_id="newGraphId"
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements",
"view" : {
"edges" : {
"edge" : { }
},
"entities" : { }
}
}, {
"class" : "uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherGraph",
"graphId" : "newGraphId",
"schema" : {
"edges" : {
"edge" : {
"properties" : {
"count" : "int"
},
"groupBy" : [ ],
"directed" : "true",
"source" : "int",
"destination" : "int"
}
},
"entities" : {
"entity" : {
"properties" : {
"count" : "int"
},
"groupBy" : [ ],
"vertex" : "int"
}
},
"types" : {
"int" : {
"aggregateFunction" : {
"class" : "uk.gov.gchq.koryphe.impl.binaryoperator.Sum"
},
"class" : "java.lang.Integer"
},
"true" : {
"validateFunctions" : [ {
"class" : "uk.gov.gchq.koryphe.impl.predicate.IsTrue"
} ],
"class" : "java.lang.Boolean"
}
}
},
"storeProperties" : {
"accumulo.instance" : "someInstanceName",
"gaffer.cache.service.class" : "uk.gov.gchq.gaffer.cache.impl.HashMapCacheService",
"accumulo.password" : "password",
"accumulo.zookeepers" : "aZookeeper",
"gaffer.store.class" : "uk.gov.gchq.gaffer.accumulostore.MockAccumuloStore",
"gaffer.store.job.tracker.enabled" : "true",
"gaffer.store.operation.declarations" : "ExportToOtherGraphOperationDeclarations.json",
"gaffer.store.properties.class" : "uk.gov.gchq.gaffer.accumulostore.AccumuloProperties",
"accumulo.user" : "user01"
}
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(
view=g.View(
edges=[
g.ElementDefinition(
group="edge"
)
],
entities=[
]
)
),
g.ExportToOtherGraph(
schema={'edges': {
'edge': {'groupBy': [], 'directed': 'true',
'properties': {'count': 'int'},
'destination': 'int', 'source': 'int'}},
'entities': {
'entity': {'groupBy': [], 'vertex': 'int',
'properties': {'count': 'int'}}},
'types': {'true': {'validateFunctions': [{
'class': 'uk.gov.gchq.koryphe.impl.predicate.IsTrue'}],
'class': 'java.lang.Boolean'},
'int': {'aggregateFunction': {
'class': 'uk.gov.gchq.koryphe.impl.binaryoperator.Sum'},
'class': 'java.lang.Integer'}}},
store_properties={
'gaffer.store.job.tracker.enabled': 'true',
'gaffer.cache.service.class': 'uk.gov.gchq.gaffer.cache.impl.HashMapCacheService',
'gaffer.store.properties.class': 'uk.gov.gchq.gaffer.accumulostore.AccumuloProperties',
'accumulo.instance': 'someInstanceName',
'accumulo.zookeepers': 'aZookeeper',
'accumulo.password': 'password',
'gaffer.store.operation.declarations': 'ExportToOtherGraphOperationDeclarations.json',
'accumulo.user': 'user01',
'gaffer.store.class': 'uk.gov.gchq.gaffer.accumulostore.MockAccumuloStore'},
graph_id="newGraphId"
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements",
"view" : {
"edges" : {
"edge" : { }
},
"entities" : { }
}
}, {
"class" : "uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherGraph",
"graphId" : "otherGafferRestApiGraphId",
"storeProperties" : {
"gaffer.host" : "localhost",
"gaffer.context-root" : "/rest/v1",
"gaffer.store.class" : "uk.gov.gchq.gaffer.proxystore.ProxyStore",
"gaffer.port" : "8081",
"gaffer.store.properties.class" : "uk.gov.gchq.gaffer.proxystore.ProxyProperties"
}
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(
view=g.View(
entities=[
],
edges=[
g.ElementDefinition(
group="edge"
)
]
)
),
g.ExportToOtherGraph(
graph_id="otherGafferRestApiGraphId",
store_properties={'gaffer.context-root': '/rest/v1',
'gaffer.store.class': 'uk.gov.gchq.gaffer.proxystore.ProxyStore',
'gaffer.host': 'localhost',
'gaffer.store.properties.class': 'uk.gov.gchq.gaffer.proxystore.ProxyProperties',
'gaffer.port': '8081'}
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements",
"view" : {
"edges" : {
"edge" : { }
},
"entities" : { }
}
}, {
"class" : "uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherGraph",
"graphId" : "exportGraphId"
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(
view=g.View(
edges=[
g.ElementDefinition(
group="edge"
)
],
entities=[
]
)
),
g.ExportToOtherGraph(
graph_id="exportGraphId"
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements",
"view" : {
"edges" : {
"edge" : { }
},
"entities" : { }
}
}, {
"class" : "uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherGraph",
"graphId" : "newGraphId",
"parentSchemaIds" : [ "exportSchemaId" ],
"parentStorePropertiesId" : "exportStorePropertiesId"
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(
view=g.View(
edges=[
g.ElementDefinition(
group="edge"
)
],
entities=[
]
)
),
g.ExportToOtherGraph(
parent_schema_ids=[
"exportSchemaId"
],
graph_id="newGraphId",
parent_store_properties_id="exportStorePropertiesId"
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.set.ExportToSet"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.DiscardOutput"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.set.GetSetExport",
"start" : 0
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(),
g.ExportToSet(),
g.DiscardOutput(),
g.GetSetExport(
start=0
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements"
}, {
"class" | |
<reponame>Tony1527/playsound
class PlaysoundException(Exception):
pass
def _playsoundWin(sound, block = True):
'''
Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on
Windows 7 with Python 2.7. Probably works with more file formats.
Probably works on Windows XP thru Windows 10. Probably works with all
versions of Python.
Inspired by (but not copied from) <NAME> <<EMAIL>>'s mp3play:
https://github.com/michaelgundlach/mp3play
I never would have tried using windll.winmm without seeing his code.
'''
from ctypes import c_buffer, windll
from random import random
from time import sleep
from sys import getfilesystemencoding
def winCommand(*command):
buf = c_buffer(255)
command = ' '.join(command).encode(getfilesystemencoding())
errorCode = int(windll.winmm.mciSendStringA(command, buf, 254, 0))
if errorCode:
errorBuffer = c_buffer(255)
windll.winmm.mciGetErrorStringA(errorCode, errorBuffer, 254)
exceptionMessage = ('\n Error ' + str(errorCode) + ' for command:'
'\n ' + command.decode() +
'\n ' + errorBuffer.value.decode())
raise PlaysoundException(exceptionMessage)
return buf.value
alias = 'playsound_' + str(random())
winCommand('open "' + sound + '" alias', alias)
winCommand('set', alias, 'time format milliseconds')
durationInMS = winCommand('status', alias, 'length')
winCommand('play', alias, 'from 0 to', durationInMS.decode())
if block:
sleep(float(durationInMS) / 1000.0)
def _playsoundOSX(sound, block = True):
'''
Utilizes AppKit.NSSound. Tested and known to work with MP3 and WAVE on
OS X 10.11 with Python 2.7. Probably works with anything QuickTime supports.
Probably works on OS X 10.5 and newer. Probably works with all versions of
Python.
Inspired by (but not copied from) Aaron's Stack Overflow answer here:
http://stackoverflow.com/a/34568298/901641
I never would have tried using AppKit.NSSound without seeing his code.
'''
from AppKit import NSSound
from Foundation import NSURL
from time import sleep
if '://' not in sound:
if not sound.startswith('/'):
from os import getcwd
sound = getcwd() + '/' + sound
sound = 'file://' + sound
url = NSURL.URLWithString_(sound)
nssound = NSSound.alloc().initWithContentsOfURL_byReference_(url, True)
if not nssound:
raise IOError('Unable to load sound named: ' + sound)
nssound.play()
if block:
sleep(nssound.duration())
def _playsoundNix(sound, block=True):
"""Play a sound using GStreamer.
Inspired by this:
https://gstreamer.freedesktop.org/documentation/tutorials/playback/playbin-usage.html
"""
if not block:
raise NotImplementedError(
"block=False cannot be used on this platform yet")
# pathname2url escapes non-URL-safe characters
import os
try:
from urllib.request import pathname2url
except ImportError:
# python 2
from urllib import pathname2url
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
Gst.init(None)
playbin = Gst.ElementFactory.make('playbin', 'playbin')
if sound.startswith(('http://', 'https://')):
playbin.props.uri = sound
else:
playbin.props.uri = 'file://' + pathname2url(os.path.abspath(sound))
set_result = playbin.set_state(Gst.State.PLAYING)
if set_result != Gst.StateChangeReturn.ASYNC:
raise PlaysoundException(
"playbin.set_state returned " + repr(set_result))
# FIXME: use some other bus method than poll() with block=False
# https://lazka.github.io/pgi-docs/#Gst-1.0/classes/Bus.html
bus = playbin.get_bus()
bus.poll(Gst.MessageType.EOS, Gst.CLOCK_TIME_NONE)
playbin.set_state(Gst.State.NULL)
from platform import system
system = system()
if system == 'Windows':
playsound = _playsoundWin
elif system == 'Darwin':
playsound = _playsoundOSX
else:
playsound = _playsoundNix
del system
from ctypes import c_buffer, windll
from random import random
from time import sleep
from sys import getfilesystemencoding
def winCommand(*command):
buf = c_buffer(255)
command = ' '.join(command).encode(getfilesystemencoding())
errorCode = int(windll.winmm.mciSendStringA(command, buf, 254, 0))
if errorCode:
errorBuffer = c_buffer(255)
windll.winmm.mciGetErrorStringA(errorCode, errorBuffer, 254)
exceptionMessage = ('\n Error ' + str(errorCode) + ' for command:'
'\n ' + command.decode() +
'\n ' + errorBuffer.value.decode())
raise PlaysoundException(exceptionMessage)
return buf.value
from threading import Thread,Event,Lock
from queue import Queue,Empty
from collections import deque
'''
music class which uses windows mci to play the music
'''
class _music(object):
__alias=None
__running_idx=None
__sound=None
__start=None
__end=None
__is_repeat=False
__id=-1
music_list=None
'''
initialize the music object
'''
def __init__(self,sound,id):
self.__alias=['','']
self.__running_idx=0
self.__id=id
self.preload(sound)
def set_music_list(self,music_list):
self.music_list = music_list
def __eq__(self,value):
return self.__id==value
'''
clear the music object
music will be closed
'''
def close(self):
self.stop()
self.__clear()
'''
get id of music
music will not be affected
'''
def get_id(self):
return self.__id
'''
return whether music plays repeatly
music will not be affected
'''
def is_repeat(self):
return self.__is_repeat
'''
return the range from start to end
music will not be affected
'''
def length(self):
if self.__check_alias():
return self.__end-self.__start
'''
return the mode of the music object
music will not be affected
'''
def mode(self):
if self.__check_alias():
return winCommand('status',self.__get_alias(),'mode').decode()
'''
pause the music
music will be paused
'''
def pause(self):
if self.__check_alias():
winCommand('pause '+self.__get_alias())
'''
play the music from start to end
music will be playing
'''
def play(self,start=0,end=-1):
self.__start,self.__end=self.__parse_start_end(start,end,self.total_length())
self.__play_implement(self.__start,self.__end)
'''
return the position of the music
music will not be affected
'''
def position(self):
if self.__check_alias():
return int(winCommand('status',self.__get_alias(),'position').decode())
'''
preload the music information
'''
def preload(self,sound):
self.__sound=sound
for i in range(2):
self.__alias[i]='playsound_'+str(random())
winCommand('open "'+self.__sound+'" alias',self.__alias[i])
winCommand('set',self.__alias[i],'time format milliseconds')
length=self.total_length()
self.__start=0
self.__end=length
return length
'''
resume playing
music will be playing
'''
def resume(self):
if self.__check_alias():
if self.__is_repeat:
self.__play_implement(self.position(),self.__end)
else:
winCommand('resume '+self.__get_alias())
'''
seek the music to pos.
music will bee paused
'''
def seek(self,pos):
if self.__check_alias():
if pos>self.__end or pos<self.__start:
raise PlaysoundException('position exceed range')
winCommand('seek',self.__get_alias(),'to',str(pos))
winCommand('play',self.__get_alias(),'from '+ str(pos) +' to',str(self.__end))
self.pause()
'''
set repeat flag of the music
music will repeatly play
'''
def set_repeat(self,repeat):
self.__is_repeat=repeat
'''
set id for music object
music will not be affected
'''
def set_id(self,id):
self.__id=id
'''
stop the music.
music will be stopped
'''
def stop(self):
if self.__check_alias():
self.seek(self.__start)
winCommand('stop '+self.__get_alias())
'''
total_length of the music object, the difference that total_length is the range is total music,
but length is only range from start to end
music will not be affected
'''
def total_length(self):
if self.__check_alias():
return int(winCommand('status',self.__get_alias(),'length').decode())
'''
update the record time of the music,
'''
def update_mode(self,delay=0):
mod = self.mode()
if mod =='playing':
#if self.__end-self.position()<delay then repeat the music
if self.__is_repeat==True:
if self.__end-self.position()<=delay:
self.__running_idx=(self.__running_idx+1)%2
self.__play_implement(self.__start,self.__end)
return mod
def __get_alias(self):
return self.__alias[self.__running_idx]
def __check_alias(self):
if self.__get_alias()!='':
return True
def __parse_start_end(self,start,end,length):
if not (isinstance(start,int) and isinstance(end,int)):
raise PlaysoundException('start and end must be int')
_start=0
_end=0
if end==-1:
_end = length
elif end<=length:
_end = end
else:
raise PlaysoundException('music range exceed limits')
if start<0 or start>length:
raise PlaysoundException('music range exceed limits')
elif _end<start:
raise PlaysoundException('end must be bigger than start')
else:
_start=start
return _start,_end
def __del__(self):
self.__clear()
def __clear(self):
if self.__check_alias():
for i in range(2):
winCommand('close '+self.__alias[i])
self.__alias=['','']
self.__start=None
self.__end=None
self.__is_repeat=False
def __play_implement(self,start,end):
winCommand('play',self.__get_alias(),'from '+ str(start) +' to',str(end))
def print(self):
if self.__check_alias():
def format_miliseconds(t):
return '%d:%d:%d.%d'%(t//3600000,(t%3600000)//60000,(t%60000)//1000,t%1000)
print('music name:',self.__sound)
print('mode:',self.mode())
print('total_length:',self.total_length())
print('position:',str(self.position()))
print('start - end: {} - {}'.format(format_miliseconds(self.__start),format_miliseconds(self.__end)))
'''
singleton
'''
class _singleton(object):
_mutex=Lock()
def __init__(self):
pass
@classmethod
def GetInstance(cls,*args,**kwargs):
if not hasattr(cls,'_instance'):
cls._mutex.acquire()
if not hasattr(cls,'_instance'):
cls._instance = cls()
print('create instance',cls._instance)
cls._mutex.release()
return cls._instance
'''
music tag is used to send message for music manager
'''
class _music_tag(object):
id=-1 #id is the connection between music player and _music object
operator='' #operator of _music object
args=None #parameters
block_event=None
block=False
retval=None #return value for some methods of music player
music_list=None #special deal with music list
def __init__(self,id,operator,block=False,*args):
self.id=id
self.operator = operator
self.args = args
if block:
self.block_event=Event()
self.block=True
def set_music_list(self,music_list):
self.music_list = music_list
'''
music player is the client who sends music tags to music manager which indeed plays music.
music player controls music once you open the music.
'''
class music_player(object):
__id=-1 #identity of every _music object
__music=None #sound
static_id=0 #static variables
mutex=Lock() #lock of static_id
music_list=None #this music player belong to which music list
def __init__(self,music_list=None):
'''
if music player belongs to one of music list,then set music_list,
otherwise you can ignore music_list parameter
'''
self.music_list = music_list
def get_music(self):
'''
get name of sound
'''
return self.__music
def close(self):
'''
close sound
'''
self.__send('close',False)
self.__id=-1
def length(self):
'''
get the length of music.
@warning: this method blocks current thread until music manager respond this functions
'''
return self.__send('length',True)
def mode(self):
'''
get the mode of music.
@warning: this method blocks current thread until music manager respond this functions
'''
return self.__send('mode',True)
def open(self,music):
'''
open the music
'''
self.__music=music
self.mutex.acquire()
self.__id=music_player.static_id
music_player.static_id=music_player.static_id+1
self.mutex.release()
self.__send('open',False,self.__music,self.__id)
def pause(self):
'''
pause the music
'''
self.__send('pause',False)
def play(self,start=0,end=-1):
'''
play the music
'''
self.__send('play',False,start,end)
def position(self):
'''
get the mode of music.
@warning: this method blocks current thread until music manager respond this functions
'''
return self.__send('position',True)
def resume(self):
'''
resume the music
'''
self.__send('resume',False)
| |
},
{
'name': 'dsn_params_ports_mismatch_dsn_multi_hosts',
'dsn': 'postgresql://host1,host2,host3/db',
'port': [111, 222],
'error': (
exceptions.InterfaceError,
'could not match 2 port numbers to 3 hosts'
)
},
{
'name': 'dsn_only_quoted_unix_host_port_in_params',
'dsn': 'postgres://user@?port=56226&host=%2Ftmp',
'result': (
[os.path.join('/tmp', '.s.PGSQL.56226')],
{
'user': 'user',
'database': 'user',
'sslmode': SSLMode.disable,
'ssl': None
}
)
},
{
'name': 'dsn_only_cloudsql',
'dsn': 'postgres:///db?host=/cloudsql/'
'project:region:instance-name&user=spam',
'result': (
[os.path.join(
'/cloudsql/project:region:instance-name',
'.s.PGSQL.5432'
)], {
'user': 'spam',
'database': 'db'
}
)
},
{
'name': 'dsn_only_cloudsql_unix_and_tcp',
'dsn': 'postgres:///db?host=127.0.0.1:5432,/cloudsql/'
'project:region:instance-name,localhost:5433&user=spam',
'result': (
[
('127.0.0.1', 5432),
os.path.join(
'/cloudsql/project:region:instance-name',
'.s.PGSQL.5432'
),
('localhost', 5433)
], {
'user': 'spam',
'database': 'db',
'ssl': True,
'sslmode': SSLMode.prefer,
}
)
},
]
@contextlib.contextmanager
def environ(self, **kwargs):
old_vals = {}
for key in kwargs:
if key in os.environ:
old_vals[key] = os.environ[key]
for key, val in kwargs.items():
if val is None:
if key in os.environ:
del os.environ[key]
else:
os.environ[key] = val
try:
yield
finally:
for key in kwargs:
if key in os.environ:
del os.environ[key]
for key, val in old_vals.items():
os.environ[key] = val
def run_testcase(self, testcase):
env = testcase.get('env', {})
test_env = {'PGHOST': None, 'PGPORT': None,
'PGUSER': None, 'PGPASSWORD': None,
'PGDATABASE': None, 'PGSSLMODE': None}
test_env.update(env)
dsn = testcase.get('dsn')
user = testcase.get('user')
port = testcase.get('port')
host = testcase.get('host')
password = testcase.get('password')
passfile = testcase.get('passfile')
database = testcase.get('database')
sslmode = testcase.get('ssl')
server_settings = testcase.get('server_settings')
expected = testcase.get('result')
expected_error = testcase.get('error')
if expected is None and expected_error is None:
raise RuntimeError(
'invalid test case: either "result" or "error" key '
'has to be specified')
if expected is not None and expected_error is not None:
raise RuntimeError(
'invalid test case: either "result" or "error" key '
'has to be specified, got both')
with contextlib.ExitStack() as es:
es.enter_context(self.subTest(dsn=dsn, env=env))
es.enter_context(self.environ(**test_env))
if expected_error:
es.enter_context(self.assertRaisesRegex(*expected_error))
addrs, params = connect_utils._parse_connect_dsn_and_args(
dsn=dsn, host=host, port=port, user=user, password=password,
passfile=<PASSWORD>file, database=database, ssl=sslmode,
connect_timeout=None, server_settings=server_settings)
params = {
k: v for k, v in params._asdict().items()
if v is not None or (expected is not None and k in expected[1])
}
if isinstance(params.get('ssl'), ssl.SSLContext):
params['ssl'] = True
result = (addrs, params)
if expected is not None:
if 'ssl' not in expected[1]:
# Avoid the hassle of specifying the default SSL mode
# unless explicitly tested for.
params.pop('ssl', None)
params.pop('sslmode', None)
self.assertEqual(expected, result, 'Testcase: {}'.format(testcase))
def test_test_connect_params_environ(self):
self.assertNotIn('AAAAAAAAAA123', os.environ)
self.assertNotIn('AAAAAAAAAA456', os.environ)
self.assertNotIn('AAAAAAAAAA789', os.environ)
try:
os.environ['AAAAAAAAAA456'] = '123'
os.environ['AAAAAAAAAA789'] = '123'
with self.environ(AAAAAAAAAA123='1',
AAAAAAAAAA456='2',
AAAAAAAAAA789=None):
self.assertEqual(os.environ['AAAAAAAAAA123'], '1')
self.assertEqual(os.environ['AAAAAAAAAA456'], '2')
self.assertNotIn('AAAAAAAAAA789', os.environ)
self.assertNotIn('AAAAAAAAAA123', os.environ)
self.assertEqual(os.environ['AAAAAAAAAA456'], '123')
self.assertEqual(os.environ['AAAAAAAAAA789'], '123')
finally:
for key in {'<KEY>', '<KEY>', '<KEY>'}:
if key in os.environ:
del os.environ[key]
def test_test_connect_params_run_testcase(self):
with self.environ(PGPORT='777'):
self.run_testcase({
'env': {
'PGUSER': '__test__'
},
'host': 'abc',
'result': (
[('abc', 5432)],
{'user': '__test__', 'database': '__test__'}
)
})
def test_connect_params(self):
for testcase in self.TESTS:
self.run_testcase(testcase)
def test_connect_pgpass_regular(self):
passfile = tempfile.NamedTemporaryFile('w+t', delete=False)
passfile.write(textwrap.dedent(R'''
abc:*:*:user:password from pgpass for user@abc
localhost:*:*:*:password from pgpass for localhost
cde:5433:*:*:password from pgpass for cde:5433
*:*:*:testuser:password from pgpass for testuser
*:*:testdb:*:password from pgpass for testdb
# comment
*:*:test\:db:test\\:password from pgpass with escapes
'''))
passfile.close()
os.chmod(passfile.name, stat.S_IWUSR | stat.S_IRUSR)
try:
# passfile path in env
self.run_testcase({
'env': {
'PGPASSFILE': passfile.name
},
'host': 'abc',
'user': 'user',
'database': 'db',
'result': (
[('abc', 5432)],
{
'password': '<PASSWORD> <PASSWORD>',
'user': 'user',
'database': 'db',
}
)
})
# passfile path as explicit arg
self.run_testcase({
'host': 'abc',
'user': 'user',
'database': 'db',
'passfile': passfile.name,
'result': (
[('abc', 5432)],
{
'password': '<PASSWORD> <PASSWORD>',
'user': 'user',
'database': 'db',
}
)
})
# passfile path in dsn
self.run_testcase({
'dsn': 'postgres://user@abc/db?passfile={}'.format(
passfile.name),
'result': (
[('abc', 5432)],
{
'password': '<PASSWORD> <PASSWORD>',
'user': 'user',
'database': 'db',
}
)
})
self.run_testcase({
'host': 'localhost',
'user': 'user',
'database': 'db',
'passfile': passfile.name,
'result': (
[('localhost', 5432)],
{
'password': '<PASSWORD>',
'user': 'user',
'database': 'db',
}
)
})
if _system != 'Windows':
# unix socket gets normalized as localhost
self.run_testcase({
'host': '/tmp',
'user': 'user',
'database': 'db',
'passfile': passfile.name,
'result': (
['/tmp/.s.PGSQL.5432'],
{
'password': '<PASSWORD>',
'user': 'user',
'database': 'db',
}
)
})
# port matching (also tests that `:` can be part of password)
self.run_testcase({
'host': 'cde',
'port': 5433,
'user': 'user',
'database': 'db',
'passfile': passfile.name,
'result': (
[('cde', 5433)],
{
'password': '<PASSWORD> cde:<PASSWORD>',
'user': 'user',
'database': 'db',
}
)
})
# user matching
self.run_testcase({
'host': 'def',
'user': 'testuser',
'database': 'db',
'passfile': passfile.name,
'result': (
[('def', 5432)],
{
'password': '<PASSWORD>',
'user': 'testuser',
'database': 'db',
}
)
})
# database matching
self.run_testcase({
'host': 'efg',
'user': 'user',
'database': 'testdb',
'passfile': passfile.name,
'result': (
[('efg', 5432)],
{
'password': '<PASSWORD>',
'user': 'user',
'database': 'testdb',
}
)
})
# test escaping
self.run_testcase({
'host': 'fgh',
'user': R'test\\',
'database': R'test\:db',
'passfile': passfile.name,
'result': (
[('fgh', 5432)],
{
'password': '<PASSWORD>',
'user': R'test\\',
'database': R'test\:db',
}
)
})
finally:
os.unlink(passfile.name)
@unittest.skipIf(_system == 'Windows', 'no mode checking on Windows')
def test_connect_pgpass_badness_mode(self):
# Verify that .pgpass permissions are checked
with tempfile.NamedTemporaryFile('w+t') as passfile:
os.chmod(passfile.name,
stat.S_IWUSR | stat.S_IRUSR | stat.S_IWGRP | stat.S_IRGRP)
with self.assertWarnsRegex(
UserWarning,
'password file .* has group or world access'):
self.run_testcase({
'host': 'abc',
'user': 'user',
'database': 'db',
'passfile': passfile.name,
'result': (
[('abc', 5432)],
{
'user': 'user',
'database': 'db',
}
)
})
def test_connect_pgpass_badness_non_file(self):
# Verify warnings when .pgpass is not a file
with tempfile.TemporaryDirectory() as passfile:
with self.assertWarnsRegex(
UserWarning,
'password file .* is not a plain file'):
self.run_testcase({
'host': 'abc',
'user': 'user',
'database': 'db',
'passfile': passfile,
'result': (
[('abc', 5432)],
{
'user': 'user',
'database': 'db',
}
)
})
def test_connect_pgpass_nonexistent(self):
# nonexistent passfile is OK
self.run_testcase({
'host': 'abc',
'user': 'user',
'database': 'db',
'passfile': 'totally nonexistent',
'result': (
[('abc', 5432)],
{
'user': 'user',
'database': 'db',
}
)
})
@unittest.skipIf(_system == 'Windows', 'no mode checking on Windows')
def test_connect_pgpass_inaccessible_file(self):
with tempfile.NamedTemporaryFile('w+t') as passfile:
os.chmod(passfile.name, stat.S_IWUSR)
# nonexistent passfile is OK
self.run_testcase({
'host': 'abc',
'user': 'user',
'database': 'db',
'passfile': passfile.name,
'result': (
[('abc', 5432)],
{
'user': 'user',
'database': 'db',
}
)
})
@unittest.skipIf(_system == 'Windows', 'no mode checking on Windows')
def test_connect_pgpass_inaccessible_directory(self):
with tempfile.TemporaryDirectory() as passdir:
with tempfile.NamedTemporaryFile('w+t', dir=passdir) as passfile:
os.chmod(passdir, stat.S_IWUSR)
try:
# nonexistent passfile is OK
self.run_testcase({
'host': 'abc',
'user': 'user',
'database': 'db',
'passfile': passfile.name,
'result': (
[('abc', 5432)],
{
'user': 'user',
'database': 'db',
}
)
})
finally:
os.chmod(passdir, stat.S_IRWXU)
async def test_connect_args_validation(self):
for val in {-1, 'a', True, False, 0}:
with self.assertRaisesRegex(ValueError, 'greater than 0'):
await asyncpg.connect(command_timeout=val)
for arg in {'max_cacheable_statement_size',
'max_cached_statement_lifetime',
'statement_cache_size'}:
for val in {None, -1, True, False}:
with self.assertRaisesRegex(ValueError, 'greater or equal'):
await asyncpg.connect(**{arg: val})
class TestConnection(tb.ConnectedTestCase):
async def test_connection_isinstance(self):
self.assertTrue(isinstance(self.con, connection.Connection))
self.assertTrue(isinstance(self.con, object))
self.assertFalse(isinstance(self.con, list))
async def test_connection_use_after_close(self):
def check():
return self.assertRaisesRegex(asyncpg.InterfaceError,
'connection is closed')
await self.con.close()
with check():
await self.con.add_listener('aaa', lambda: None)
with check():
self.con.transaction()
with check():
await self.con.executemany('SELECT 1', [])
with check():
await self.con.set_type_codec('aaa', encoder=None, decoder=None)
with check():
await self.con.set_builtin_type_codec('aaa', codec_name='aaa')
for meth in ('execute', 'fetch', 'fetchval', 'fetchrow',
'prepare', 'cursor'):
with check():
await getattr(self.con, meth)('SELECT 1')
with check():
await self.con.reset()
@unittest.skipIf(os.environ.get('PGHOST'), 'unmanaged cluster')
async def test_connection_ssl_to_no_ssl_server(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.load_verify_locations(SSL_CA_CERT_FILE)
with self.assertRaisesRegex(ConnectionError, 'rejected SSL'):
await self.connect(
host='localhost',
user='ssl_user',
ssl=ssl_context)
@unittest.skipIf(os.environ.get('PGHOST'), 'unmanaged cluster')
async def test_connection_sslmode_no_ssl_server(self):
async def verify_works(sslmode):
con = None
try:
con = await self.connect(
dsn='postgresql://foo/?sslmode=' + sslmode,
user='postgres',
database='postgres',
host='localhost')
self.assertEqual(await con.fetchval('SELECT 42'), 42)
self.assertFalse(con._protocol.is_ssl)
finally:
if con:
await con.close()
async def verify_fails(sslmode):
con = None
try:
with self.assertRaises(ConnectionError):
con = await self.connect(
dsn='postgresql://foo/?sslmode=' + sslmode,
user='postgres',
database='postgres',
host='localhost')
await con.fetchval('SELECT 42')
finally:
if con:
await con.close()
await verify_works('disable')
await verify_works('allow')
await verify_works('prefer')
await verify_fails('require')
await verify_fails('verify-ca')
await verify_fails('verify-full')
async def test_connection_implicit_host(self):
conn_spec = self.get_connection_spec()
con = await asyncpg.connect(
port=conn_spec.get('port'),
database=conn_spec.get('database'),
user=conn_spec.get('user'))
await con.close()
class BaseTestSSLConnection(tb.ConnectedTestCase):
@classmethod
def get_server_settings(cls):
conf = super().get_server_settings()
conf.update({
'ssl': 'on',
'ssl_cert_file': SSL_CERT_FILE,
'ssl_key_file': SSL_KEY_FILE,
'ssl_ca_file': CLIENT_CA_CERT_FILE,
})
return conf
@classmethod
def setup_cluster(cls):
cls.cluster = cls.new_cluster(pg_cluster.TempCluster)
cls.start_cluster(
cls.cluster, server_settings=cls.get_server_settings())
def setUp(self):
super().setUp()
self.cluster.reset_hba()
create_script = []
create_script.append('CREATE ROLE ssl_user WITH LOGIN;')
self._add_hba_entry()
# Put hba changes into effect
self.cluster.reload()
create_script = '\n'.join(create_script)
self.loop.run_until_complete(self.con.execute(create_script))
def tearDown(self):
# Reset cluster's pg_hba.conf since we've meddled with it
self.cluster.trust_local_connections()
drop_script = []
drop_script.append('DROP ROLE ssl_user;')
drop_script = '\n'.join(drop_script)
self.loop.run_until_complete(self.con.execute(drop_script))
super().tearDown()
def _add_hba_entry(self):
raise NotImplementedError()
@unittest.skipIf(os.environ.get('PGHOST'), 'unmanaged cluster')
class TestSSLConnection(BaseTestSSLConnection):
def _add_hba_entry(self):
self.cluster.add_hba_entry(
type='hostssl', address=ipaddress.ip_network('127.0.0.0/24'),
database='postgres', user='ssl_user',
auth_method='trust')
self.cluster.add_hba_entry(
type='hostssl', address=ipaddress.ip_network('::1/128'),
database='postgres', user='ssl_user',
auth_method='trust')
async def test_ssl_connection_custom_context(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.load_verify_locations(SSL_CA_CERT_FILE)
con = await self.connect(
host='localhost',
user='ssl_user',
| |
pulumi.Input[str] message_id: The message id.
:param pulumi.Input[str] message_release: The message release version.
:param pulumi.Input[str] message_version: The message version.
:param pulumi.Input[int] release_indicator: The release indicator.
:param pulumi.Input[int] repetition_separator: The repetition separator.
:param pulumi.Input[int] segment_terminator: The segment terminator.
:param pulumi.Input[str] segment_terminator_suffix: The segment terminator suffix.
:param pulumi.Input[str] target_namespace: The target namespace on which this delimiter settings has to be applied.
"""
if component_separator is not None:
pulumi.set(__self__, "component_separator", component_separator)
if data_element_separator is not None:
pulumi.set(__self__, "data_element_separator", data_element_separator)
if decimal_point_indicator is not None:
pulumi.set(__self__, "decimal_point_indicator", decimal_point_indicator)
if message_association_assigned_code is not None:
pulumi.set(__self__, "message_association_assigned_code", message_association_assigned_code)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if message_release is not None:
pulumi.set(__self__, "message_release", message_release)
if message_version is not None:
pulumi.set(__self__, "message_version", message_version)
if release_indicator is not None:
pulumi.set(__self__, "release_indicator", release_indicator)
if repetition_separator is not None:
pulumi.set(__self__, "repetition_separator", repetition_separator)
if segment_terminator is not None:
pulumi.set(__self__, "segment_terminator", segment_terminator)
if segment_terminator_suffix is not None:
pulumi.set(__self__, "segment_terminator_suffix", segment_terminator_suffix)
if target_namespace is not None:
pulumi.set(__self__, "target_namespace", target_namespace)
@property
@pulumi.getter(name="componentSeparator")
def component_separator(self) -> Optional[pulumi.Input[int]]:
"""
The component separator.
"""
return pulumi.get(self, "component_separator")
@component_separator.setter
def component_separator(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "component_separator", value)
@property
@pulumi.getter(name="dataElementSeparator")
def data_element_separator(self) -> Optional[pulumi.Input[int]]:
"""
The data element separator.
"""
return pulumi.get(self, "data_element_separator")
@data_element_separator.setter
def data_element_separator(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "data_element_separator", value)
@property
@pulumi.getter(name="decimalPointIndicator")
def decimal_point_indicator(self) -> Optional[pulumi.Input[str]]:
"""
The decimal point indicator.
"""
return pulumi.get(self, "decimal_point_indicator")
@decimal_point_indicator.setter
def decimal_point_indicator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "decimal_point_indicator", value)
@property
@pulumi.getter(name="messageAssociationAssignedCode")
def message_association_assigned_code(self) -> Optional[pulumi.Input[str]]:
"""
The message association assigned code.
"""
return pulumi.get(self, "message_association_assigned_code")
@message_association_assigned_code.setter
def message_association_assigned_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_association_assigned_code", value)
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[pulumi.Input[str]]:
"""
The message id.
"""
return pulumi.get(self, "message_id")
@message_id.setter
def message_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_id", value)
@property
@pulumi.getter(name="messageRelease")
def message_release(self) -> Optional[pulumi.Input[str]]:
"""
The message release version.
"""
return pulumi.get(self, "message_release")
@message_release.setter
def message_release(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_release", value)
@property
@pulumi.getter(name="messageVersion")
def message_version(self) -> Optional[pulumi.Input[str]]:
"""
The message version.
"""
return pulumi.get(self, "message_version")
@message_version.setter
def message_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_version", value)
@property
@pulumi.getter(name="releaseIndicator")
def release_indicator(self) -> Optional[pulumi.Input[int]]:
"""
The release indicator.
"""
return pulumi.get(self, "release_indicator")
@release_indicator.setter
def release_indicator(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "release_indicator", value)
@property
@pulumi.getter(name="repetitionSeparator")
def repetition_separator(self) -> Optional[pulumi.Input[int]]:
"""
The repetition separator.
"""
return pulumi.get(self, "repetition_separator")
@repetition_separator.setter
def repetition_separator(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "repetition_separator", value)
@property
@pulumi.getter(name="segmentTerminator")
def segment_terminator(self) -> Optional[pulumi.Input[int]]:
"""
The segment terminator.
"""
return pulumi.get(self, "segment_terminator")
@segment_terminator.setter
def segment_terminator(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "segment_terminator", value)
@property
@pulumi.getter(name="segmentTerminatorSuffix")
def segment_terminator_suffix(self) -> Optional[pulumi.Input[str]]:
"""
The segment terminator suffix.
"""
return pulumi.get(self, "segment_terminator_suffix")
@segment_terminator_suffix.setter
def segment_terminator_suffix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "segment_terminator_suffix", value)
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> Optional[pulumi.Input[str]]:
"""
The target namespace on which this delimiter settings has to be applied.
"""
return pulumi.get(self, "target_namespace")
@target_namespace.setter
def target_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_namespace", value)
@pulumi.input_type
class EdifactEnvelopeOverrideArgs:
def __init__(__self__, *,
application_password: Optional[pulumi.Input[str]] = None,
association_assigned_code: Optional[pulumi.Input[str]] = None,
controlling_agency_code: Optional[pulumi.Input[str]] = None,
functional_group_id: Optional[pulumi.Input[str]] = None,
group_header_message_release: Optional[pulumi.Input[str]] = None,
group_header_message_version: Optional[pulumi.Input[str]] = None,
message_association_assigned_code: Optional[pulumi.Input[str]] = None,
message_id: Optional[pulumi.Input[str]] = None,
message_release: Optional[pulumi.Input[str]] = None,
message_version: Optional[pulumi.Input[str]] = None,
receiver_application_id: Optional[pulumi.Input[str]] = None,
receiver_application_qualifier: Optional[pulumi.Input[str]] = None,
sender_application_id: Optional[pulumi.Input[str]] = None,
sender_application_qualifier: Optional[pulumi.Input[str]] = None,
target_namespace: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] application_password: The application password.
:param pulumi.Input[str] association_assigned_code: The association assigned code.
:param pulumi.Input[str] controlling_agency_code: The controlling agency code.
:param pulumi.Input[str] functional_group_id: The functional group id.
:param pulumi.Input[str] group_header_message_release: The group header message release.
:param pulumi.Input[str] group_header_message_version: The group header message version.
:param pulumi.Input[str] message_association_assigned_code: The message association assigned code.
:param pulumi.Input[str] message_id: The message id on which this envelope settings has to be applied.
:param pulumi.Input[str] message_release: The message release version on which this envelope settings has to be applied.
:param pulumi.Input[str] message_version: The message version on which this envelope settings has to be applied.
:param pulumi.Input[str] receiver_application_id: The receiver application id.
:param pulumi.Input[str] receiver_application_qualifier: The receiver application qualifier.
:param pulumi.Input[str] sender_application_id: The sender application id.
:param pulumi.Input[str] sender_application_qualifier: The sender application qualifier.
:param pulumi.Input[str] target_namespace: The target namespace on which this envelope settings has to be applied.
"""
if application_password is not None:
pulumi.set(__self__, "application_password", application_password)
if association_assigned_code is not None:
pulumi.set(__self__, "association_assigned_code", association_assigned_code)
if controlling_agency_code is not None:
pulumi.set(__self__, "controlling_agency_code", controlling_agency_code)
if functional_group_id is not None:
pulumi.set(__self__, "functional_group_id", functional_group_id)
if group_header_message_release is not None:
pulumi.set(__self__, "group_header_message_release", group_header_message_release)
if group_header_message_version is not None:
pulumi.set(__self__, "group_header_message_version", group_header_message_version)
if message_association_assigned_code is not None:
pulumi.set(__self__, "message_association_assigned_code", message_association_assigned_code)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if message_release is not None:
pulumi.set(__self__, "message_release", message_release)
if message_version is not None:
pulumi.set(__self__, "message_version", message_version)
if receiver_application_id is not None:
pulumi.set(__self__, "receiver_application_id", receiver_application_id)
if receiver_application_qualifier is not None:
pulumi.set(__self__, "receiver_application_qualifier", receiver_application_qualifier)
if sender_application_id is not None:
pulumi.set(__self__, "sender_application_id", sender_application_id)
if sender_application_qualifier is not None:
pulumi.set(__self__, "sender_application_qualifier", sender_application_qualifier)
if target_namespace is not None:
pulumi.set(__self__, "target_namespace", target_namespace)
@property
@pulumi.getter(name="applicationPassword")
def application_password(self) -> Optional[pulumi.Input[str]]:
"""
The application password.
"""
return pulumi.get(self, "application_password")
@application_password.setter
def application_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_password", value)
@property
@pulumi.getter(name="associationAssignedCode")
def association_assigned_code(self) -> Optional[pulumi.Input[str]]:
"""
The association assigned code.
"""
return pulumi.get(self, "association_assigned_code")
@association_assigned_code.setter
def association_assigned_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "association_assigned_code", value)
@property
@pulumi.getter(name="controllingAgencyCode")
def controlling_agency_code(self) -> Optional[pulumi.Input[str]]:
"""
The controlling agency code.
"""
return pulumi.get(self, "controlling_agency_code")
@controlling_agency_code.setter
def controlling_agency_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "controlling_agency_code", value)
@property
@pulumi.getter(name="functionalGroupId")
def functional_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The functional group id.
"""
return pulumi.get(self, "functional_group_id")
@functional_group_id.setter
def functional_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "functional_group_id", value)
@property
@pulumi.getter(name="groupHeaderMessageRelease")
def group_header_message_release(self) -> Optional[pulumi.Input[str]]:
"""
The group header message release.
"""
return pulumi.get(self, "group_header_message_release")
@group_header_message_release.setter
def group_header_message_release(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_header_message_release", value)
@property
@pulumi.getter(name="groupHeaderMessageVersion")
def group_header_message_version(self) -> Optional[pulumi.Input[str]]:
"""
The group header message version.
"""
return pulumi.get(self, "group_header_message_version")
@group_header_message_version.setter
def group_header_message_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_header_message_version", value)
@property
@pulumi.getter(name="messageAssociationAssignedCode")
def message_association_assigned_code(self) -> Optional[pulumi.Input[str]]:
"""
The message association assigned code.
"""
return pulumi.get(self, "message_association_assigned_code")
@message_association_assigned_code.setter
def message_association_assigned_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_association_assigned_code", value)
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[pulumi.Input[str]]:
"""
The message id on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_id")
@message_id.setter
def message_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_id", value)
@property
@pulumi.getter(name="messageRelease")
def message_release(self) -> Optional[pulumi.Input[str]]:
"""
The message release version on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_release")
@message_release.setter
def message_release(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_release", value)
@property
@pulumi.getter(name="messageVersion")
def message_version(self) -> Optional[pulumi.Input[str]]:
"""
The message version on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_version")
@message_version.setter
def message_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_version", value)
@property
@pulumi.getter(name="receiverApplicationId")
def receiver_application_id(self) -> Optional[pulumi.Input[str]]:
"""
The receiver application id.
"""
return pulumi.get(self, "receiver_application_id")
@receiver_application_id.setter
def receiver_application_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "receiver_application_id", value)
@property
@pulumi.getter(name="receiverApplicationQualifier")
def receiver_application_qualifier(self) -> Optional[pulumi.Input[str]]:
"""
The receiver application qualifier.
"""
return pulumi.get(self, "receiver_application_qualifier")
@receiver_application_qualifier.setter
def receiver_application_qualifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "receiver_application_qualifier", value)
@property
@pulumi.getter(name="senderApplicationId")
def sender_application_id(self) -> Optional[pulumi.Input[str]]:
"""
The sender application id.
"""
return pulumi.get(self, "sender_application_id")
@sender_application_id.setter
def sender_application_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sender_application_id", value)
@property
@pulumi.getter(name="senderApplicationQualifier")
def sender_application_qualifier(self) -> Optional[pulumi.Input[str]]:
"""
The sender application qualifier.
"""
return pulumi.get(self, "sender_application_qualifier")
@sender_application_qualifier.setter
def sender_application_qualifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sender_application_qualifier", value)
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> Optional[pulumi.Input[str]]:
"""
The target namespace on which this envelope settings has to be applied.
"""
return pulumi.get(self, "target_namespace")
@target_namespace.setter
def target_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_namespace", value)
@pulumi.input_type
class EdifactEnvelopeSettingsArgs:
def __init__(__self__, *,
application_reference_id: Optional[pulumi.Input[str]] = None,
apply_delimiter_string_advice: Optional[pulumi.Input[bool]] = None,
communication_agreement_id: Optional[pulumi.Input[str]] = None,
create_grouping_segments: Optional[pulumi.Input[bool]] = None,
enable_default_group_headers: Optional[pulumi.Input[bool]] = None,
functional_group_id: Optional[pulumi.Input[str]] = None,
group_application_password: Optional[pulumi.Input[str]] = None,
group_application_receiver_id: Optional[pulumi.Input[str]] = None,
group_application_receiver_qualifier: Optional[pulumi.Input[str]] = None,
group_application_sender_id: Optional[pulumi.Input[str]] = None,
group_application_sender_qualifier: Optional[pulumi.Input[str]] = None,
group_association_assigned_code: Optional[pulumi.Input[str]] = None,
group_control_number_lower_bound: Optional[pulumi.Input[int]] = None,
group_control_number_prefix: Optional[pulumi.Input[str]] = None,
group_control_number_suffix: Optional[pulumi.Input[str]] = None,
group_control_number_upper_bound: Optional[pulumi.Input[int]] = None,
group_controlling_agency_code: Optional[pulumi.Input[str]] = None,
group_message_release: Optional[pulumi.Input[str]] = None,
group_message_version: Optional[pulumi.Input[str]] = None,
interchange_control_number_lower_bound: Optional[pulumi.Input[int]] = None,
interchange_control_number_prefix: Optional[pulumi.Input[str]] = None,
interchange_control_number_suffix: Optional[pulumi.Input[str]] = None,
interchange_control_number_upper_bound: Optional[pulumi.Input[int]] = None,
is_test_interchange: Optional[pulumi.Input[bool]] = None,
overwrite_existing_transaction_set_control_number: Optional[pulumi.Input[bool]] = None,
processing_priority_code: Optional[pulumi.Input[str]] = None,
receiver_internal_identification: Optional[pulumi.Input[str]] = None,
receiver_internal_sub_identification: Optional[pulumi.Input[str]] = None,
receiver_reverse_routing_address: Optional[pulumi.Input[str]] = None,
recipient_reference_password_qualifier: Optional[pulumi.Input[str]] = None,
recipient_reference_password_value: Optional[pulumi.Input[str]] = None,
rollover_group_control_number: Optional[pulumi.Input[bool]] = None,
rollover_interchange_control_number: Optional[pulumi.Input[bool]] = None,
rollover_transaction_set_control_number: Optional[pulumi.Input[bool]] = None,
| |
field=ofdef.create_oxm(ofdef.NXM_NX_REG5, netid)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG6, 0xffffffff)
),
ofdef.nx_action_resubmit(
in_port=ofdef.OFPP_IN_PORT & 0xffff,
table=l2output
)
],
data = packet._tobytes()
)
]
)
async def _send_buffer_packet_out(netid,macaddress,ipaddress,srcmacaddress,packet,bid = ofdef.OFP_NO_BUFFER):
await self.execute_commands(conn,
[
ofdef.ofp_packet_out(
buffer_id = bid,
in_port = ofdef.OFPP_CONTROLLER,
actions = [
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(ofdef.NXM_NX_REG5,netid)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG6, 0xffffffff)
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC,srcmacaddress)
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,macaddress)
),
ofdef.ofp_action(
type = ofdef.OFPAT_DEC_NW_TTL
),
ofdef.nx_action_resubmit(
in_port = ofdef.OFPP_IN_PORT & 0xffff,
table = l2output
)
],
data = packet._tobytes() if bid == ofdef.OFP_NO_BUFFER else b''
)
]
)
async def _add_host_flow(netid,macaddress,ipaddress,srcmaddress):
await self.execute_commands(conn,
[
ofdef.ofp_flow_mod(
table_id=l3output,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id=ofdef.OFP_NO_BUFFER,
hard_timeout = self._parent.arp_complete_timeout,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG5, netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ipaddress)
]
),
instructions=[
ofdef.ofp_instruction_actions(
actions = [
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC, srcmaddress)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,macaddress)
),
ofdef.ofp_action(
type=ofdef.OFPAT_DEC_NW_TTL
)
]
),
ofdef.ofp_instruction_goto_table(table_id=l2output)
]
),
ofdef.ofp_flow_mod(
cookie = 0x1,
cookie_mask=0xffffffffffffffff,
table_id=l3output,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY,
buffer_id=ofdef.OFP_NO_BUFFER,
idle_timeout=self._parent.arp_complete_timeout * 2,
flags = ofdef.OFPFF_SEND_FLOW_REM,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG5, netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST, ipaddress)
]
),
instructions=[
ofdef.ofp_instruction_actions(
actions=[
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC, srcmaddress)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_DST, macaddress)
),
ofdef.ofp_action(
type=ofdef.OFPAT_DEC_NW_TTL
),
ofdef.ofp_action_output(
port = ofdef.OFPP_CONTROLLER,
max_len = 60
)
]
),
ofdef.ofp_instruction_goto_table(table_id=l2output)
]
)
]
)
async def _add_static_routes_flow(from_net_id,cidr,to_net_id,smac,dmac):
network,prefix = parse_ip4_network(cidr)
await self.execute_commands(conn,[
ofdef.ofp_flow_mod(
table_id=l3router,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY + prefix,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG4, from_net_id),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST_W,
network,
get_netmask(prefix))
]
),
instructions=[
ofdef.ofp_instruction_actions(
actions=[
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG5, to_net_id)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC, smac)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_DST, dmac)
),
ofdef.ofp_action(
type=ofdef.OFPAT_DEC_NW_TTL
)
]
),
ofdef.ofp_instruction_goto_table(table_id=l2output)
]
)
])
async def _add_static_host_flow(ipaddress, dmac, netid, smac):
await self.execute_commands(conn, [
ofdef.ofp_flow_mod(
table_id=l3output,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG5, netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST, ip4_addr(ipaddress))
]
),
instructions=[
ofdef.ofp_instruction_actions(
actions=[
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC, smac)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_DST, dmac)
),
ofdef.ofp_action(
type=ofdef.OFPAT_DEC_NW_TTL
)
]
),
ofdef.ofp_instruction_goto_table(table_id=l2output)
]
)
])
while True:
ev, m = await M_(packetin_matcher, arpreply_matcher,arpflow_request_matcher,arpflow_remove_matcher)
msg = ev.message
try:
if m is packetin_matcher:
outnetworkid = ofdef.uint32.create(ofdef.get_oxm(msg.match.oxm_fields, ofdef.NXM_NX_REG5))
ippacket = ethernet_l4.create(msg.data)
ct = time.time()
if (outnetworkid,ippacket.ip_dst) in self._arp_cache:
status,_,_,mac,_ = self._arp_cache[(outnetworkid,ippacket.ip_dst)]
# this mac is real mac
if status == 2:
info = self._getinterfaceinfobynetid(outnetworkid)
if info:
smac,ip,_= info
self.subroutine(_send_buffer_packet_out(outnetworkid,mac,ip,mac_addr(smac),
ippacket,msg.buffer_id))
continue
if (outnetworkid,ippacket.ip_dst) in self._packet_buffer:
# checkout timeout packet
nv = [(p,bid,t) for p,bid,t in self._packet_buffer[(outnetworkid,ippacket.ip_dst)]
if ct < t]
nv.append((ippacket,msg.buffer_id,ct + self._parent.buffer_packet_timeout))
self._packet_buffer[(outnetworkid,ippacket.ip_dst)] = nv
else:
self._packet_buffer[(outnetworkid,ippacket.ip_dst)] = \
[(ippacket,msg.buffer_id,ct + self._parent.buffer_packet_timeout)]
e = ARPRequest(self._connection,ipaddress=ippacket.ip_dst,
logicalnetworkid=outnetworkid,isstatic=False,
cidr=ip4_addr.formatter(ippacket.ip_dst))
self.subroutine(self.wait_for_send(e), False)
elif m is arpflow_request_matcher:
outnetworkid = ofdef.uint32.create(ofdef.get_oxm(msg.match.oxm_fields, ofdef.NXM_NX_REG5))
#ipaddress = ofdef.get_oxm(msg.match.oxm_fields,ofdef.OXM_OF_IPV4_DST)
ippacket = ethernet_l4.create(msg.data)
ipaddress = ippacket.ip_dst
ct = time.time()
if(outnetworkid,ipaddress) in self._arp_cache:
status,timeout,isstatic,mac,cidr = self._arp_cache[(outnetworkid,ipaddress)]
if status == 2:
# we change this arp entry status in cache ,, next cycle will send arp request
entry = (3,timeout,isstatic,mac,cidr)
self._arp_cache[(outnetworkid,ipaddress)] = entry
elif m is arpflow_remove_matcher:
nid = ofdef.uint32.create(ofdef.get_oxm(msg.match.oxm_fields, ofdef.NXM_NX_REG5))
ip_address = ip4_addr(ip4_addr_bytes.formatter(
ofdef.get_oxm(msg.match.oxm_fields, ofdef.OXM_OF_IPV4_DST)))
if(nid,ip_address) in self._arp_cache:
_, _, isstatic, _, _ = self._arp_cache[(nid,ip_address)]
# never delete static arp entry ..
if not isstatic:
del self._arp_cache[(nid,ip_address)]
if (nid,ip_address) in self._packet_buffer:
del self._packet_buffer[(nid,ip_address)]
elif m is arpreply_matcher:
netid = ofdef.uint32.create(ofdef.get_oxm(msg.match.oxm_fields,ofdef.NXM_NX_REG5))
arp_reply_packet = ethernet_l7.create(msg.data)
reply_ipaddress = arp_reply_packet.arp_spa
reply_macaddress = arp_reply_packet.arp_sha
dst_macaddress = arp_reply_packet.dl_dst
if (netid,reply_ipaddress) in self._arp_cache:
status, timeout, isstatic,_,cidr = self._arp_cache[(netid,reply_ipaddress)]
ct = time.time()
if isstatic:
entry = (2,ct + self._parent.static_host_arp_refresh_interval,
isstatic,reply_macaddress,cidr)
self._arp_cache[(netid,reply_ipaddress)] = entry
# add static routes in l3router
network_relate_router = self._getallinterfaceinfobynetid(netid)
for k, v in network_relate_router.items():
for smac, nid in v:
self.subroutine(_add_static_routes_flow(nid, cidr, netid,
mac_addr(smac), reply_macaddress))
if netid == nid:
self.subroutine(_add_static_host_flow(ip4_addr.formatter(reply_ipaddress),
reply_macaddress, nid, mac_addr(smac)))
else:
# this is the first arp reply
if status == 1 or status == 3:
# complete timeout ,,, after flow hard_timeout, packet will send to controller too
# if packet in this timeout , will send an unicast arp request
# is best 1*self._parent.arp_complete_timeout < t < 2*self._parent.arp_complete_timeout
self._arp_cache[(netid,reply_ipaddress)] = (2,
ct + self._parent.arp_complete_timeout + 20,False,reply_macaddress,cidr)
# search msg buffer ,, packet out msg there wait this arp reply
if (netid,reply_ipaddress) in self._packet_buffer:
for packet,bid, t in self._packet_buffer[(netid,reply_ipaddress)]:
self.subroutine(_send_buffer_packet_out(netid,reply_macaddress,
reply_ipaddress,dst_macaddress,packet,bid))
del self._packet_buffer[(netid,reply_ipaddress)]
# add flow about this host in l3output
# change asyncStart from false to true ,, send buffer packet before add flow
self.subroutine(_add_host_flow(netid,reply_macaddress,reply_ipaddress,dst_macaddress))
except Exception:
self._logger.warning(" handler router packetin message error , ignore !",exc_info=True)
async def _update_handler(self):
dataobjectchange = iop.DataObjectChanged.createMatcher(None, None, self._connection)
while True:
ev = await dataobjectchange
self._lastlogicalport, self._lastphyport, self._lastlogicalnet, self._lastphynet = ev.current
self._update_walk()
self.updateobjects((p for p,_ in self._lastlogicalport))
def _update_walk(self):
logicalportkeys = [p.getkey() for p, _ in self._lastlogicalport]
logicalnetkeys = [n.getkey() for n, _ in self._lastlogicalnet]
phyportkeys = [p.getkey() for p,_ in self._lastphyport]
phynetkeys = [n.getkey() for n,_ in self._lastphynet]
dvrforwardinfokeys = [DVRouterForwardSet.default_key()]
self._initialkeys = logicalportkeys + logicalnetkeys + phyportkeys + phyportkeys + dvrforwardinfokeys
self._original_keys = logicalportkeys + logicalnetkeys + phyportkeys + phyportkeys + dvrforwardinfokeys
self._walkerdict = dict(itertools.chain(((p, self._walk_lgport) for p in logicalportkeys),
((n, self._walk_lgnet) for n in logicalnetkeys),
((n, self._walk_phynet) for n in phynetkeys),
((f, self._walk_dvrforwardinfo) for f in dvrforwardinfokeys),
((p, self._walk_phyport) for p in phyportkeys)))
self.subroutine(self.restart_walk(), False)
def _walk_dvrforwardinfo(self,key,value,walk,save):
save(key)
for weakref in value.set.dataset():
try:
weakobj = walk(weakref.getkey())
except KeyError:
pass
else:
save(weakobj.getkey())
def _walk_lgport(self, key, value, walk, save):
if value is None:
return
save(key)
def _walk_lgnet(self, key, value, walk, save):
if value is None:
return
save(key)
lgnetmapkey = LogicalNetworkMap.default_key(LogicalNetwork._getIndices(key)[1][0])
with suppress(WalkKeyNotRetrieved):
lgnetmap = walk(lgnetmapkey)
save(lgnetmap.getkey())
if self._parent.prepush:
for lgport_weak in lgnetmap.ports.dataset():
with suppress(WalkKeyNotRetrieved):
lgport = walk(lgport_weak.getkey())
save(lgport.getkey())
for subnet_weak in lgnetmap.subnets.dataset():
with suppress(WalkKeyNotRetrieved):
subnetobj = walk(subnet_weak.getkey())
save(subnetobj.getkey())
if hasattr(subnetobj, "router"):
routerport = walk(subnetobj.router.getkey())
save(routerport.getkey())
if hasattr(routerport, "router"):
router = walk(routerport.router.getkey())
save(router.getkey())
for weakobj in router.interfaces.dataset():
routerport_weakkey = weakobj.getkey()
# we walk from this key , so except
if routerport_weakkey != routerport.getkey():
with suppress(WalkKeyNotRetrieved):
weakrouterport = walk(routerport_weakkey)
save(routerport_weakkey)
if hasattr(weakrouterport, "subnet"):
weaksubnet = walk(weakrouterport.subnet.getkey())
save(weaksubnet.getkey())
if hasattr(weaksubnet, "network"):
logicalnetwork = walk(weaksubnet.network.getkey())
save(logicalnetwork.getkey())
def _walk_phyport(self, key, value, walk, save):
if value is None:
return
save(key)
def _walk_phynet(self,key,value,walk,save):
if value is None:
return
save(key)
def reset_initialkeys(self,keys,values):
subnetkeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(SubNet)]
routerportkeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(RouterPort)]
routerkeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(VRouter)]
forwardinfokeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(DVRouterForwardInfoRef)]
self._initialkeys = tuple(itertools.chain(self._original_keys,subnetkeys,
routerportkeys,routerkeys,forwardinfokeys))
async def updateflow(self, connection, addvalues, removevalues, updatedvalues):
try:
datapath_id = connection.openflow_datapathid
ofdef = connection.openflowdef
vhost = connection.protocol.vhost
lastsubnetinfo = self._lastsubnetinfo
lastlgportinfo = self._lastlgportinfo
lastrouterstoreinterfaceinfo = self._lastrouterstoreinterfacenetinfo
lastnetworkrouterinfo = self._lastnetworkrouterinfo
lastnetworkroutertableinfo = self._lastnetworkroutertableinfo
lastnetworkstaticroutesinfo= self._lastnetworkstaticroutesinfo
laststaticroutes= self._laststaticroutes
laststoreinfo = self._laststoreinfo
lastnetworkforwardinfo = self._lastnetworkforwardinfo
lastexternallgportinfo = self._lastexternallgportinfo
allobjects = set(o for o in self._savedresult if o is not None and not o.isdeleted())
dvrforwardinfo = dict(((f.from_pynet,f.to_pynet),f.info) for f in allobjects
if f.isinstance(DVRouterForwardInfoRef))
self._lastdvrforwardinfo = dvrforwardinfo
currentphynetinfo = dict((n,n.id) for n,_ in self._lastphynet if n in allobjects)
# phyport : phynet = 1:1, so we use phynet as key
currentphyportinfo = dict((p.physicalnetwork, (p,id)) for p, id in self._lastphyport if p in allobjects
and p.physicalnetwork in currentphynetinfo)
currentlognetinfo = {}
lognetinfo = dict((n,id) for n,id in self._lastlogicalnet if n in allobjects)
for n,id in lognetinfo.items():
# this lognetwork has phyport, we should get phyport mac
# as the base mac to produce mac that when router send packet used!
# else , use innmac
if n.physicalnetwork in currentphyportinfo:
_,phyportid = currentphyportinfo[n.physicalnetwork]
openflow_port = await call_api(self, "openflowportmanager", "waitportbyno",
{"datapathid": datapath_id,
"vhost": vhost,
"portno": phyportid})
portmac = openflow_port.hw_addr
# convert physicalport mac as router out mac
outmac = [s ^ m for s, m in zip(portmac, mac_addr(self._parent.outroutermacmask))]
currentlognetinfo[n] = (id,mac_addr.formatter(outmac),phyportid)
else:
currentlognetinfo[n] = (id,self._parent.inroutermac,None)
currentlgportinfo = dict((p,(p.ip_address,p.mac_address,currentlognetinfo[p.network][0],p.network.id))
for p,id in self._lastlogicalport if p in allobjects
and hasattr(p,"ip_address")
and hasattr(p,"mac_address")
and p.network in currentlognetinfo)
currentexternallgportinfo = dict((p,(p.ip_address,p.mac_address,currentlognetinfo[p.network][0],
currentlognetinfo[p.network][1]))
for p in allobjects if p.isinstance(LogicalPort)
and hasattr(p,"ip_address")
and hasattr(p,"mac_address")
and p.network in currentlognetinfo
and p not in currentlgportinfo)
self._lastlgportinfo = currentlgportinfo
self._lastexternallgportinfo = currentexternallgportinfo
subnet_to_routerport = dict((p.subnet,p) for p in allobjects if p.isinstance(RouterPort))
router_to_routerport = dict((p.router,p) for p in allobjects if p.isinstance(RouterPort))
routerport_to_subnet = dict((p, p.subnet) for p | |
= "lix_3x_holder_c",
tip_type = "opentrons_96_tiprack_300ul",
flow_rate_aspirate = 50, flow_rate_dispense = 50,
bottom_clearance = 1
):
""" ot2_layout should be a dictionary:
{"plates" : "1,2",
"holders" : "7,8",
"tips" : "9,10"}
"""
print("Processing sample list(s) ...")
slist,transfer_list,bdict = process_sample_lists(xls_fns, b_lim=b_lim)
if ldict is None:
print("Reading bar/QR codes, this might take a while ...")
ldict = read_OT2_layout(ot2_layout["plates"], ot2_layout["holders"])
print(ldict)
holders = {}
holder_qr_codes = chain(ldict['holders'].keys())
print(f"{len(ldict['holders'])} holders are available.")
for st in slist:
if not st[0] in holders.keys():
try:
holders[st[0]]= next(holder_qr_codes)
except StopIteration:
print("Error: Not enough sample holders for transfer.")
raise
print(f"{len(holders)} holders are needed.")
fn = f"{run_name}_protocol.py"
print(f"Generating protocol ({fn}) ...")
protocol = ["metadata = {'protocolName': 'sample transfer',\n",
" 'author': 'LiX',\n",
" 'description': 'auto-generated',\n",
" 'apiLevel': '2.3'\n",
" }\n",
"\n",
"def run(ctx):\n",]
for slot in ot2_layout["plates"].split(","):
protocol.append(f" lbw{slot} = ctx.load_labware('{plate_type}', '{slot}')\n")
for slot in ot2_layout["holders"].split(","):
protocol.append(f" lbw{slot} = ctx.load_labware('{holder_type}', '{slot}')\n")
tips = []
for slot in ot2_layout["tips"].split(","):
protocol.append(f" lbw{slot} = ctx.load_labware('{tip_type}', '{slot}')\n")
tips.append(f"lbw{slot}")
protocol.append(f" pipet = ctx.load_instrument('p300_single', 'left', tip_racks=[{','.join(tips)}])\n")
protocol.append(f" pipet.well_bottom_clearance.aspirate = {bottom_clearance}\n")
protocol.append(f" pipet.flow_rate.aspirate = {flow_rate_aspirate}\n")
protocol.append(f" pipet.flow_rate.dispense = {flow_rate_dispense}\n")
for st in transfer_list:
src,sw,dest,dw,vol = st
if dest in holders.keys():
dest = holders[dest]
if src in ldict["plates"].keys():
sname = f"lbw{ldict['plates'][src]['slot']}.well('{sw}')"
elif src in holders.values():
sname = f"lbw{ldict['holders'][src]['slot']}.well('{ldict['holders'][src]['holder']}{sw}')"
else:
raise Exception(f"Unknown labware encountered: {src}")
if dest in ldict["plates"].keys():
dname = f"lbw{ldict['plates'][dest]['slot']}.well('{dw}')"
elif dest in holders.values():
dname = f"lbw{ldict['holders'][dest]['slot']}.well('{ldict['holders'][dest]['holder']}{dw}')"
else:
raise Exception(f"Unknown labware encountered: {dest}")
protocol.append(f" pipet.transfer({vol}, {sname}, {dname})\n")
fd = open(fn, "w+")
fd.writelines(protocol)
fd.close()
fn = f"{run_name}.xlsx"
print(f"Writing measurement sequence to {fn}.")
generate_measurement_spreadsheet(fn, slist, holders, bdict)
print("Done.")
def generate_docs2(ot2_layout, xls_fns,
run_name="test",
b_lim=4,
plate_types = ["corning_96_wellplate_360ul_flat",
"biorad_96_wellplate_200ul_pcr"],
holder_types = ["lix_3x_holder_c"],
tip_types = ["opentrons_96_tiprack_300ul",
"opentrons_96_tiprack_20ul"],
pipets = {"left": {"type": "p300_single", "tip_size": "300ul", "maxV": 300},
"right": {"type": "p20_single", "tip_size": "20ul", "maxV": 20}},
flow_rate_aspirate = 0.3, flow_rate_dispense = 0.3 # fraction of the maxV
):
""" ot2_layout should be a dictionary, slot #: labware type
{"1" : "lix_3x_holder_c",
"2" : "corning_96_wellplate_360ul_flat",
"3" : "opentrons_96_tiprack_300ul"
"4" : "lix_3x_holder_c",
"6" : "opentrons_96_tiprack_20ul"}
"""
print("Processing sample list(s) ...")
slist,transfer_list,bdict = process_sample_lists(xls_fns, b_lim=b_lim)
h_slots = [k for k,l in ot2_layout.items() if l in holder_types]
p_slots = [k for k,l in ot2_layout.items() if l in plate_types]
t_slots = [k for k,l in ot2_layout.items() if l in tip_types]
print("Reading bar/QR codes, this might take a while ...")
ldict = read_OT2_layout(",".join(p_slots), ",".join(h_slots))
print(ldict)
holders = {}
holder_qr_codes = chain(ldict['holders'].keys())
print(f"{len(ldict['holders'])} holders are available.")
for st in slist:
if not st[0] in holders.keys():
try:
holders[st[0]]= next(holder_qr_codes)
except StopIteration:
print("Error: Not enough sample holders for transfer.")
raise
print(f"{len(holders)} holders are needed.")
fn = f"{run_name}_protocol.py"
print(f"Generating protocol ({fn}) ...")
protocol = ["metadata = {'protocolName': 'sample transfer',\n",
" 'author': 'LiX',\n",
" 'description': 'auto-generated',\n",
" 'apiLevel': '2.3'\n",
" }\n",
"\n",
"def run(ctx):\n",]
for slot in p_slots+h_slots+t_slots:
protocol.append(f" lbw{slot} = ctx.load_labware('{ot2_layout[slot]}', '{slot}')\n")
for k,p in pipets.items():
tips = ','.join([f"lbw{s}" for s in t_slots if p["tip_size"] in ot2_layout[s]])
protocol.append(f" pipet_{k} = ctx.load_instrument('p300_single', 'left', tip_racks=[{tips}])\n")
protocol.append(f" pipet_{k}.flow_rate.aspirate = {flow_rate_aspirate*p['maxV']}\n")
protocol.append(f" pipet_{k}.flow_rate.dispense = {flow_rate_dispense*p['maxV']}\n")
# sorted by maxV, low to high
pvdict = {pipets[pn]["maxV"]:pn for pn in pipets.keys()}
pvdict = {k:pvdict[k] for k in sorted(pvdict.keys())}
vlist = list(pvdict.keys())
def select_pipet(v):
if (v>vlist).all():
raise Exception(f"requested transfer volume exceeds tip maximum")
elif (v<vlist).all():
p = pvdict[vlist[0]]
else:
p = pvdict[vlist[-1]]
return f"pipet_{p}"
for st in transfer_list:
src,sw,dest,dw,vol = st
if dest in holders.keys():
dest = holders[dest]
if src in ldict["plates"].keys():
sname = f"lbw{ldict['plates'][src]['slot']}.well('{sw}')"
elif src in holders.values():
sname = f"lbw{ldict['holders'][src]['slot']}.well('{ldict['holders'][src]['holder']}{sw}')"
else:
raise Exception(f"Unknown labware encountered: {src}")
if dest in ldict["plates"].keys():
dname = f"lbw{ldict['plates'][dest]['slot']}.well('{dw}')"
elif dest in holders.values():
dname = f"lbw{ldict['holders'][dest]['slot']}.well('{ldict['holders'][dest]['holder']}{dw}')"
else:
raise Exception(f"Unknown labware encountered: {dest}")
protocol.append(f" {select_pipet(vol)}.transfer({vol}, {sname}, {dname})\n")
fd = open(fn, "w+")
fd.writelines(protocol)
fd.close()
fn = f"{run_name}.xlsx"
print(f"Writing measurement sequence to {fn}.")
generate_measurement_spreadsheet(fn, slist, holders, bdict)
print("Done.")
def validatePlateSampleListGUI():
propTx = ipywidgets.Text(value='',
layout=ipywidgets.Layout(width='20%'),
description='Proposal:')
safTx = ipywidgets.Text(value='',
layout=ipywidgets.Layout(width='20%'),
description='SAF:')
plateTx = ipywidgets.Text(value='',
layout=ipywidgets.Layout(width='16%'),
description='plate ID:')
fnFU = ipywidgets.FileUpload(accept='.xlsx', multiple=False,
description="sample list upload",
layout=ipywidgets.Layout(width='30%'))
btnValidate = ipywidgets.Button(description='Validate',
layout=ipywidgets.Layout(width='25%'),
style = {'description_width': 'initial'})
outTxt = ipywidgets.Textarea(layout=ipywidgets.Layout(width='55%'))
hbox1 = ipywidgets.HBox([propTx, safTx, plateTx])
hbox2 = ipywidgets.HBox([fnFU, btnValidate])
vbox = ipywidgets.VBox([hbox1, hbox2, outTxt])
def on_validate_clicked(b):
flist = list(fnFU.value.keys())
if len(flist)==0:
outTxt.value = "upload the sample list spreadsheet first ..."
return
try:
msg = validate_sample_list(flist[0], generate_barcode=True,
proposal_id=propTx.value,
SAF_id=safTx.value,
plate_id=plateTx.value)
outTxt.value = "\n".join(msg)
except Exception as e:
s,r = getattr(e, 'message', str(e)), getattr(e, 'message', repr(e))
outTxt.value = "Error: "+s
display(vbox)
btnValidate.on_click(on_validate_clicked)
# adapted from 04-sample.py
def check_sample_name(sample_name, sub_dir=None,
check_for_duplicate=True, check_dir=False,
data_path="./" # global variable in 04-sample.py
):
if len(sample_name)>42: # file name length limit for Pilatus detectors
print("Error: the sample name is too long:", len(sample_name))
return False
l1 = re.findall('[^:._A-Za-z0-9\-]', sample_name)
if len(l1)>0:
print("Error: the file name contain invalid characters: ", l1)
return False
if check_for_duplicate:
f_path = data_path
if sub_dir is not None:
f_path += ('/'+sub_dir+'/')
#if DET_replace_data_path:
#f_path = data_path.replace(default_data_path_root, substitute_data_path_root)
if PilatusFilePlugin.froot == data_file_path.ramdisk:
f_path = data_path.replace(data_file_path.gpfs.value, data_file_path.ramdisk.value)
if check_dir:
fl = glob.glob(f_path+sample_name)
else:
fl = glob.glob(f_path+sample_name+"_000*")
if len(fl)>0:
print(f"Error: name already exists: {sample_name} at {f_path}")
return False
return True
# adapted from startup_solution.py
def parseSpreadsheet(infilename, sheet_name=0, strFields=[]):
""" dropna removes empty rows
"""
converter = {col: str for col in strFields}
DataFrame = pd.read_excel(infilename, sheet_name=sheet_name,
converters=converter, engine="openpyxl")
DataFrame.dropna(axis=0, how='all', inplace=True)
return DataFrame.to_dict()
def checkHolderSpreadsheet(spreadSheet, sheet_name=0,
check_for_duplicate=False, configName=None,
requiredFields=['sampleName', 'holderName', 'position'],
optionalFields=['volume', 'exposure', 'bufferName'],
autofillFields=['holderName', 'volume', 'exposure'],
strFields=['sampleName', 'bufferName', 'holderName'],
numFields=['volume', 'position', 'exposure'],
min_load_volume=50):
d = parseSpreadsheet(spreadSheet, sheet_name, strFields)
tf = set(requiredFields) - set(d.keys())
if len(tf)>0:
raise Exception(f"missing fields in spreadsheet: {list(tf)}")
autofillSpreadsheet(d, fields=autofillFields)
allFields = list(set(requiredFields+optionalFields).intersection(d.keys()))
for f in list(set(allFields).intersection(strFields)):
for e in d[f].values():
if not isinstance(e, str):
if not np.isnan(e):
raise Exception(f"non-string value in {f}: {e}")
for f in list(set(allFields).intersection(numFields)):
for e in d[f].values():
if not (isinstance(e, int) or isinstance(e, float)):
raise Exception(f"non-numerical value in {f}: {e}")
if e<=0 or np.isnan(e):
raise Exception(f"invalid value in {f}: {e}, positive value required.")
if 'volume' in allFields:
if np.min(list(d['volume'].values()))<min_load_volume:
raise Exception(f"load volume must be greater than {min_load_volume} ul!")
# max position number is 18
sp = np.asarray(list(d['position'].values()), dtype=int)
if sp.max()>18:
raise Exception(f"invalid sample positionL {sp.max()}.")
if sp.min()<1:
raise Exception(f"invalid sample positionL {sp.min()}.")
sdict = {}
for (hn,pos,sn,bn) in zip(d['holderName'].values(),
d['position'].values(),
d['sampleName'].values(),
d['bufferName'].values()):
if not hn in sdict.keys():
sdict[hn] = {}
if str(sn)=='nan':
continue
if pos in sdict[hn].keys():
raise Exception(f"duplicate sample position {pos} in {hn}")
if not check_sample_name(sn, check_for_duplicate=False):
raise Exception(f"invalid sample name: {sn} in holder {hn}")
sdict[hn][pos] = {'sample': sn}
if str(bn)!='nan':
sdict[hn][pos]['buffer'] = bn
for hn,sd in sdict.items():
plist = list(sd.keys())
slist = [t['sample'] for t in sd.values()]
for pos,t in sd.items():
if slist.count(t['sample'])>1:
raise Exception(f"duplicate sample name {t['sample']} in {hn}")
if not 'buffer' in t.keys():
continue
if not t['buffer'] in slist:
raise Exception(f"{t['buffer']} is not a valid buffer in {hn}")
bpos = plist[slist.index(t['buffer'])]
if (bpos-pos)%2:
raise Exception(f"{t['sample']} and its buffer not in the same row in holder {hn}")
return sdict
def autofillSpreadsheet(d, fields=['holderName', 'volume']):
""" if the filed in one of the autofill_fileds is empty, duplicate the value from the previous row
"""
col_names = list(d.keys())
n_rows = len(d[col_names[0]])
if n_rows<=1:
return
for ff in fields:
if ff not in d.keys():
#print(f"invalid column name: {ff}")
continue
idx = list(d[ff].keys())
for i in range(n_rows-1):
if str(d[ff][idx[i+1]])=='nan':
d[ff][idx[i+1]] = d[ff][idx[i]]
def validateHolderSpreadsheet(fn, proposal_id, SAF_id):
# meant to be used by the users to attach to SAF
# limit to 3 sample holders per spreadsheet
# validate sample list on the Holders tab and generate UIDs for each holder
# beamline prints the QR codes and ship the holders to user
print("Checking spreadsheet format ...")
sdict = checkHolderSpreadsheet(fn)
hlist = list(sdict.keys())
if len(hlist)>3:
raise Exception(f"Found {len(hlist)} sample holders. Only 3 are allowed.")
ll = np.asarray([len(h) for h in hlist])
if (ll>5).any(): # for the purpose of fitting the text on the QR | |
def uitleg_nl(state):
uitleg = ''
if state == 'rondpass':
uitleg = '''
U, en uw tegenstanders, mogen pas openen met 12 punten, er zitten in totaal 40 punten in het spel.
Dat betekent dat het mogelijk is dat niemand 12 punten heeft, wat betekend dat het mogelijk is dat niemand opent.
Als dat het geval is spreken we van een rondpas.
In het geval van een rondpas, is er geen contract, wat betekend dat er niet gespeeld kan worden.
'''
if state == '1SA_opening':
uitleg = '''
Als uw puntenaantal ligt tussen 15 en 17 denkt u altijd als eerst aan een SA-opening.
Dit staat voor sans a tout, zonder troef.
Als u heeft vastgesteld dat uw puntenaantal goed is kunt u kijken of u een sans-verdeling heeft.
Een sansverdeling is een verdeling waarin geen kleur meer dan 5 kaarten bevat, en geen minder dan 2.
Als dit het geval is betekend dat dat u 1SA kunt openen.
'''
if state == '1SA_opening-NT':
uitleg = '''
Als uw puntenaantal ligt tussen 15 en 17 denkt u altijd als eerst aan een SA-opening.
SA staat voor sans a tout, wat zonder troef betekent in het Frans.
Als u heeft vastgesteld dat uw puntenaantal goed is kunt u kijken of u een sans-verdeling heeft.
Een sansverdeling is een verdeling waarin geen kleur meer dan 5 kaarten bevat, en geen minder dan 2.
Als dit het geval is betekend dat dat u 1SA kunt openen.
'''
if state == '2SA_opening':
uitleg = '''
Als uw puntenaantal tussen de 20 en 22 ligt denkt u altijd als eerst aan een 2SA-opening.
SA staat voor sans a tout, wat zonder troef betekent in het Frans.
Als u heeft vastgeseteld dat uw puntenaantal goed is kunt u kijken of u een sansverdeling heeft.
Een sansverdeling is een verdeling waarin geen kleur meer dan 5 kaarten bevat, en geen minder dan 2.
Als dit het geval is betekend dat dat u 2SA kunt openen.
'''
if state == '2Cs_opening':
uitleg = '''
Een 2♣ opening is manchforcing, dit betekent dat u naar de manch gaat.
De manch is 3SA, 4♥, 4♠, 5♣ en 5♦.
Als u en uw partner de manch halen krijgen jullie extra punten, dat heet de manchpremie.
Om de manch te bereiken heeft u heel wat punten nodig, 25, of 8 vaste slagen.
Als u 2♣ kunt openen betekent dat dat u en uw parten niet mogen stoppen met bieden totdat
jullie de manch hebben bereikt.
'''
if state == 'Normal_5card':
uitleg = '''
Om te openen heeft u minimaal 12 punten nodig.
Daarna moet u vaststellen in welke kleur u gaat openen,
uw eerste keus is altijd de kleur waarin de meeste kaarten zitten.
Met 2 vijfkaarten opent u de hoogste, met 2 vierkaarten de laagste, en uiteraard als u een zeskaart heeft gaat dat boven uw vijfkaart en vierkaart.
Zels als de zeskaart een lage kleur is en de vijfkaart of vierkaart een hoge.
'''
if state == 'Normal_4card':
uitleg = '''
Om te openen heeft u minimaal 12 punten nodig.
Daarna moet u vaststellen in welke kleur u gaat openen,
uw eerste keus is altijd de kleur waarin de meeste kaarten zitten.
Met 2 vijfkaarten opent u de hoogste, met 2 vierkaarten de laagste.
Echter, met de hoge kleuren, ♥ en ♠, mag u pas openen met een vijfkaart.
'''
if state == '1Cs_opening':
uitleg = '''
Om te openen heeft u minimaal 12 punten nodig.
Daarna moet u vaststellen in welke kleur u gaat openen,
uw eerste keus is altijd de kleur waarin de meeste kaarten zitten.
Met 2 vijfkaarten opent u de hoogste, met 2 vierkaarten de laagste.
Echter, met de hoge kleuren, ♥ en ♠, mag u pas openen met een vijfkaart.
In het geval u dan niet kunt openen,
dus u heeft geen vierkaart in de lage kleuren en geen vijfkaart in de hoge, rest er het 1♣ bod.
Dit kan al vanaf een doubleton in klaveren, onthoud dat dus voor als uw partner dit bied.
'''
if state == 'preemtif2':
uitleg = '''
Om te openen heeft u minimaal 12 punten nodig, soms heeft u dit niet maar wel een hele lange kaart.
Dan mag u uw tegenstanders een beetje pesten door hun bieding te saboteren.
Dit heet preëmptief bieden, bieden met een lange kaart in plaats van met punten, om te slagen heeft u wel een punten minimum van 6.
Door hoog te bieden met weinig punten neemt u biedruimte weg voor uw tegenstanders, vandaar pesten dus.
Met een 6 kaart bied u op 2-niveau, hoe langer de kaart hoe hoger u mag bieden en hoe vervelender u mag zijn voor de tegenstanders.
Onthoud ook dat preëmptief bieden gedefinieerd wordt door de sprong, dus als uw tegenstander, of partner al geboden heeft is preëmptief bieden mogelijk.
'''
if state == 'preemtif3':
uitleg = '''
Om te openen heeft u minimaal 12 punten nodig, soms heeft u dit niet maar wel een hele lange kaart.
Dan mag u uw tegenstanders een beetje pesten door hun bieding te saboteren.
Dit heet preëmptief bieden, bieden met een lange kaart in plaats van met punten, om te slagen heeft u wel een punten minimum van 6.
Door hoog te bieden met weinig punten neemt u biedruimte weg voor uw tegenstanders, vandaar pesten dus.
Onthoud ook dat preëmptief bieden gedefinieerd wordt door de sprong, dus als uw tegenstander, of partner al geboden heeft is preëmptief bieden mogelijk.
Met een 7-kaart bied u op 3-niveau, hoe langer de kaart hoe hoger u mag bieden en hoe vervelender u mag zijn voor de tegenstanders.
'''
if state == 'preemtif4':
uitleg = '''
Om te openen heeft u minimaal 12 punten nodig, soms heeft u dit niet maar wel een hele lange kaart.
Dan mag u uw tegenstanders een beetje pesten door hun bieding te saboteren.
Dit heet preëmptief bieden, bieden met een lange kaart in plaats van met punten, om te slagen heeft u wel een punten minimum van 6.
Door hoog te bieden met weinig punten neemt u biedruimte weg voor uw tegenstanders, vandaar pesten dus.
Onthoud ook dat preëmptief bieden gedefinieerd wordt door de sprong, dus als uw tegenstander, of partner al geboden heeft is preëmptief bieden mogelijk.
Met een 8-kaart bied u op 4-niveau, hoe langer de kaart hoe hoger u mag bieden en hoe vervelender u mag zijn voor de tegenstanders.
Als u preëmptief biedt op 4-niveau kan het zijn dat u al de manch heeft bereikt, namelijk in de hoge kleuren. Dan spelen jullie dus de manch.
'''
if state == 'open_pass':
uitleg = '''
Om te openen heeft u minimaal 12 punten nodig, soms heeft u deze gewoon niet en hoe vervelend ook, moet u passen.
'''
if state == 'jacoby':
uitleg = '''
Uw partner bied 1SA, dit betekend 15-17 punten en een evenwichtige hand.
U weet ook dat uw partners laagste kaart een doubleton is.
Samen 8 kaarten heet een fit (met een fit kunt u in die kleur spelen), dus minimaal 2 + 5 is bijna een fit.
Als jullie een (bijna) fit hebben kunnen jullie beter in die kleur spelen dan het risico van SA te nemen,
daarom is Jacoby bedacht, en daarom kunt u dit ook al bieden vanaf 0 punten,
met de voorwaarde van een vijfkaart in een van de hoge kleuren, ♥ en ♠.
Omdat u liever heeft dat de tegenstanders de minste punten zien, en dus de minste honneurs, heeft u het liefst dat de 1SA openaar speelt,
daar heeft de heer Jacoby iets op bedacht, als u, met deze hand, de kleur onder de kleur die eigenlijk wilt bieden biedt,
kan daarna uw partner de kleur bieden die u bedoelde, dit is verplicht, en is het spel in de hand van de openaar.
'''
if state == 'jacoby-2sa':
uitleg = '''
Uw partner bied 2SA, dit betekend 20-22 punten en een evenwichtige hand.
U weet ook dat uw partners laagste kaart een doubleton is.
Samen 8 kaarten heet een fit (met een fit kunt u in die kleur spelen), dus minimaal 2 + 5 is bijna een fit.
Als jullie een (bijna) fit hebben kunnen jullie beter in die kleur spelen dan het risico van SA te nemen,
daarom is Jacoby bedacht, en daarom kunt u dit ook al bieden vanaf 0 punten,
met de voorwaarde van een vijfkaart in een van de hoge kleuren, ♥ en ♠.
Omdat u liever heeft dat de tegenstanders de minste punten zien, en dus de minste honneurs, heeft u het liefst dat de 2SA openaar speelt,
daar heeft de heer Jacoby iets op bedacht, als u, met deze hand, de kleur onder de kleur die eigenlijk wilt bieden biedt,
kan daarna uw partner de kleur bieden die u bedoelde, dit is verplicht, en is het spel in de hand van de openaar.
'''
if state == 'stayman':
uitleg = '''
Uw partner biedt 1SA, dit betekend 15-17 punten en een evenwichtige hand.
Het liefst speelt u in een hoge kleur, ♥ en ♠.
Dus als u een vierkaart in een van de hoge kleur heeft is dit het onderzoeken waard,
en aangezien u met 8-9 punten prima 2SA kunt spelen kunt u met dit puntenaantal ook eerst de hoge kleuren onderzoeken.
U biedt 2♣ om aan uw partner te vragen of hij een vierkaart in een van de hoge kleuren heeft.
Onthoud echter wel dat dit alleen zin heeft als u ook een vierkaart in de hoge kleuren heeft.
'''
if state == 'stayman-2sa':
uitleg = '''
Uw partner biedt 2SA, dit betekend 20-22 punten en een evenwichtige hand.
Het liefst speelt u in een hoge kleur, ♥ en ♠.
Dus als u een vierkaart in een van de hoge kleur heeft is dit het onderzoeken waard.
Met 5 punten halen jullie al de manch (want 20 + minimaal 5 = minimaal 25) dus er zijn niet veel punten nodig om dit te onderzoeken.
U biedt 3♣ om aan uw partner te vragen of hij een vierkaart in een van de hoge kleuren heeft.
'''
if state == '1SA-2SA':
uitleg = '''
Uw partner bied 1SA, dit betekend 15-17 punten en een evenwichtige hand.
Het liefst speelt u in een hoge kleur, ♥ en ♠, maar als u geen vierkaart in een van deze kleuren heeft is dit het niet waard.
U weet dat uw partner 15-17 punten heeft, met 8 punten, (8 + 17 = 25) kunnen jullie de manch nog halen,
om aan uw partner te vragen of hij denkt dat dit nog mogelijk is bied u 2SA, met een maximum bied uw partner de manch.
'''
if state == '1SA-3SA':
uitleg = '''
Uw partner bied 1SA, dit betekend 15-17 punten en een evenwichtige hand.
Het liefst speelt u in een hoge kleur, ♥ en ♠, maar als u geen vierkaart in een van deze kleuren heeft is het niet het onderzoeken waard.
Maar u heeft 10+ punten (10 + 15 = 25) dus u wilt naar de manch,
aangezien u al weet dat het geen hoge kleuren manch gaat worden biedt u gewoon de een-na-beste manche, 3SA.
'''
if state == '1SA-pass':
uitleg = '''
Uw partner bied 1SA, dit betekend 15-17 punten en een evenwichte hand.
Als u geem vijfkaart heeft in de hoge kleuren en niet meer dan 7 punten, is 1SA gewoon een prima contract.
'''
if state == 'answer_to_stayman_colors':
uitleg = '''
Uw partner biedt Stayman, hiermee vraagt hij of u een hoge kleur heeft,
als u een vierkaart of meer heeft in een van de hoge kleuren moet u dat aan uw partner laten weten.
'''
if state == 'answer_to_stayman_nocolors':
uitleg = '''
Uw partner biedt Stayman, hiermee vraagt hij of u een hoge kleur heeft,
als u een vierkaart of meer heeft in een van de hoge kleuren moet u dat aan uw partner laten weten.
In dit geval is dat niet zo, ook dat wilt u uw partner laten weten, hier is het bod 2♦ voor.
U zegt nu dat u geen vierkaart of meer in een van de hoge kleuren heeft.
'''
if state == 'answer_to_stayman_multicolor':
uitleg = '''
Uw partner biedt Stayman, hiermee vraagt hij of u een hoge kleur heeft,
als u een vierkaart of meer heeft in een van de hoge kleuren moet u dat aan uw partner laten weten.
Maar wat nu als u in beide kleuren 4 of maar kaarten heeft?
Dan biedt u 2♥, daarna bied uw partner of 2SA, 3SA, 3♥ of 4♥.
Als uw partner SA bied ontkent hij / zij harten, maar door Stayman te bieden beloofde hij / zij minstens een vierkaart in een van de hoge kleuren.
Wat betekend dat, in het geval van een SA-bod, u weet dat jullie fit in de schoppen zit, en dan kunt u dan schoppen bieden over uw partners SA.
Dus ook al laat u uw partner niet meteen weten dat u ook een 4-kaart in de schoppen heeft, kunt u zo wel te weten komen of jullie fit in de harten of schoppen zit.
'''
if state == 'OpStayman':
uitleg = '''
Uw tegenstanders bieden Stayman en zijn op zoek naar het juiste contract voor wat ze kunnen spelen,
de kans dat u en uw partner een contract gaan maken is dan erg klein,
dat betekent niet dat u geen informatie aan uw partner kunt geven.
Uw tegenstander heeft Stayman geboden, daarmee geeft hij geen klaveren aan, dus dit is een ideaal moment om er veilig een doublet te bieden, aangezien de 1SA-openaar nooit mag passen, en het doublet dus geen waarde heeft.
Dus als u een goede kaart klaveren heeft is dit de ideale situatie om dat aan uw partner te laten weten, door te doubleren.
'''
if state == 'OpStaymanPass':
uitleg = '''
Uw tegenstanders bieden stayman en zijn op zoek naar het juiste contract voor wat ze kunnen spelen,
de kans dat u en uw partner een contract gaan maken is dan erg klein,
dat betekent niet dat u geen informatie aan uw partner kunt geven.
Uw tegenstander heeft Stayman geboden, daarmee geeft hij geen klaveren aan, dus dit is een ideaal moment om veilig een doublet te bieden, aangezien de 1SA-openaar nooit mag passen, en het doublet dus geen waarde heeft.
Dus als u een goede kaart klaveren hebt is dit de ideale situatie om dat aan uw partner te laten weten, door te doubleren.
Echter als u nu doubleert is de kans groot dat uw partner terugkomt met klaveren als dat niet de bedoeling is moet | |
<reponame>WoonchanCho/pynetdicom
"""Unit test coverage for the logging."""
from io import BytesIO
import logging
import sys
import pytest
from pydicom.uid import (
ImplicitVRLittleEndian, ExplicitVRLittleEndian, ExplicitVRBigEndian,
DeflatedExplicitVRLittleEndian, JPEGBaseline, JPEGExtended,
JPEGLosslessP14, JPEGLossless, JPEGLSLossless, JPEGLSLossy,
JPEG2000Lossless, JPEG2000, JPEG2000MultiComponentLossless,
JPEG2000MultiComponent, RLELossless,
generate_uid,
)
from pynetdicom import build_context, evt, AE, build_role, debug_logger
from pynetdicom.acse import ACSE, APPLICATION_CONTEXT_NAME
from pynetdicom.dimse_primitives import C_MOVE, N_EVENT_REPORT, N_GET, N_DELETE
from pynetdicom._handlers import (
doc_handle_echo, doc_handle_find, doc_handle_c_get, doc_handle_move,
doc_handle_store, doc_handle_action, doc_handle_create, doc_handle_delete,
doc_handle_event_report, doc_handle_n_get, doc_handle_set,
doc_handle_async, doc_handle_sop_common, doc_handle_sop_extended,
doc_handle_userid, doc_handle_acse, doc_handle_dimse, doc_handle_data,
doc_handle_pdu, doc_handle_transport, doc_handle_assoc, doc_handle_fsm,
debug_fsm, debug_data
)
from pynetdicom.pdu import (
A_ASSOCIATE_RQ, A_ASSOCIATE_AC,
)
from pynetdicom.pdu_primitives import (
A_ASSOCIATE,
MaximumLengthNotification,
ImplementationClassUIDNotification,
ImplementationVersionNameNotification,
SCP_SCU_RoleSelectionNegotiation,
AsynchronousOperationsWindowNegotiation,
SOPClassExtendedNegotiation,
SOPClassCommonExtendedNegotiation,
UserIdentityNegotiation,
)
from pynetdicom.sop_class import CTImageStorage, VerificationSOPClass
#debug_logger()
REFERENCE_USER_ID = [
(
(1, b'username', None, False),
[
"Authentication Mode: 1 - Username",
"Username: [username]",
"Positive Response Requested: No",
],
),
(
(1, b'username', None, True),
[
"Authentication Mode: 1 - Username",
"Username: [username]",
"Positive Response Requested: Yes",
],
),
(
(2, b'username', b'pass', False),
[
"Authentication Mode: 2 - Username/Password",
"Username: [username]",
"Password: [<PASSWORD>]",
"Positive Response Requested: No",
],
),
(
(2, b'username', b'pass', True),
[
"Authentication Mode: 2 - Username/Password",
"Username: [username]",
"Password: [<PASSWORD>]",
"Positive Response Requested: Yes",
],
),
(
(3, b'KERBEROS', None, False),
[
"Authentication Mode: 3 - Kerberos",
"Kerberos Service Ticket (not dumped) length: 8",
"Positive Response Requested: No",
],
),
(
(3, b'KERBEROS', None, True),
[
"Authentication Mode: 3 - Kerberos",
"Kerberos Service Ticket (not dumped) length: 8",
"Positive Response Requested: Yes",
],
),
(
(4, b'SAML', None, False),
[
"Authentication Mode: 4 - SAML",
"SAML Assertion (not dumped) length: 4",
"Positive Response Requested: No",
],
),
(
(4, b'SAML', None, True),
[
"Authentication Mode: 4 - SAML",
"SAML Assertion (not dumped) length: 4",
"Positive Response Requested: Yes",
],
),
(
(5, b'JSON', None, False),
[
"Authentication Mode: 5 - JSON Web Token",
"JSON Web Token (not dumped) length: 4",
"Positive Response Requested: No",
],
),
(
(5, b'JSON', None, True),
[
"Authentication Mode: 5 - JSON Web Token",
"JSON Web Token (not dumped) length: 4",
"Positive Response Requested: Yes",
],
)
]
DOC_HANDLERS = [
doc_handle_echo, doc_handle_find, doc_handle_c_get, doc_handle_move,
doc_handle_store, doc_handle_action, doc_handle_create, doc_handle_delete,
doc_handle_event_report, doc_handle_n_get, doc_handle_set,
doc_handle_async, doc_handle_sop_common, doc_handle_sop_extended,
doc_handle_userid, doc_handle_acse, doc_handle_dimse, doc_handle_data,
doc_handle_pdu, doc_handle_transport, doc_handle_assoc, doc_handle_fsm
]
def test_debug_logger():
"""Test __init__.debug_logger()."""
logger = logging.getLogger('pynetdicom')
assert len(logger.handlers) == 1
assert isinstance(logger.handlers[0], logging.NullHandler)
debug_logger()
handlers = logger.handlers
assert len(logger.handlers) == 1
assert isinstance(logger.handlers[0], logging.StreamHandler)
debug_logger()
handlers = logger.handlers
assert len(logger.handlers) == 1
assert isinstance(logger.handlers[0], logging.StreamHandler)
class TestDocHandlers(object):
"""Dummy tests to coverage for handler documentation functions."""
@pytest.mark.parametrize('handler', DOC_HANDLERS)
def test_doc_handlers(self, handler):
handler(None)
class TestStandardDIMSE(object):
def setup(self):
"""Setup each test."""
self.ae = None
def teardown(self):
"""Cleanup after each test"""
if self.ae:
self.ae.shutdown()
def test_send_n_delete_rsp(self):
"""Test the handler for N-DELETE rsp"""
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
msg = N_DELETE()
msg.MessageIDBeingRespondedTo = 1
msg.AffectedSOPClassUID = '1.2.3'
msg.AffectedSOPInstanceUID = '1.2.3.4'
msg.Status = 0x0000
assoc.dimse.send_msg(msg, 1)
assoc.release()
scp.shutdown()
def test_send_n_get_rq_multiple_attr(self):
"""Test the handler for N-GET rq with multiple Attribute Identifiers"""
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
msg = N_GET()
msg.MessageID = 1
msg.RequestedSOPClassUID = '1.2.3'
msg.RequestedSOPInstanceUID = '1.2.3.4'
msg.AttributeIdentifierList = [(0x0000,0x0010), (0x00080010)]
assoc.dimse.send_msg(msg, 1)
assoc.release()
scp.shutdown()
def test_send_n_event_report_rsp(self):
"""Test the handler for N-EVENT-REPORT rsp with Event Type ID."""
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
msg = N_EVENT_REPORT()
msg.MessageIDBeingRespondedTo = 1
msg.AffectedSOPClassUID = '1.2.3'
msg.AffectedSOPInstanceUID = '1.2.3.4'
msg.EventTypeID = 1 # US
msg.EventReply = BytesIO(b'\x00\x01') # Dataset
msg.Status = 0x0000
assoc.dimse.send_msg(msg, 1)
assoc.release()
scp.shutdown()
def test_send_c_move_rsp_no_affected_sop(self):
"""Test the handler for C-MOVE rsp with no Affected SOP Class UID."""
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
msg = C_MOVE()
msg.MessageIDBeingRespondedTo = 1
msg.Status = 0x0000
msg.NumberOfRemainingSuboperations = 0
msg.NumberOfCompletedSuboperations = 0
msg.NumberOfFailedSuboperations = 0
msg.NumberOfWarningSuboperations = 0
assoc.dimse.send_msg(msg, 1)
assoc.release()
scp.shutdown()
class TestStandardLogging(object):
"""Tests for standard logging handlers."""
def setup(self):
"""Setup each test."""
self.ae = None
# A-ASSOCIATE (request)
primitive = A_ASSOCIATE()
primitive.application_context_name = APPLICATION_CONTEXT_NAME
# Calling AE Title is the source DICOM AE title
primitive.calling_ae_title = b'ABCDEFGHIJKLMNOP'
# Called AE Title is the destination DICOM AE title
primitive.called_ae_title = b'1234567890123456'
# The TCP/IP address of the source, pynetdicom includes port too
primitive.calling_presentation_address = ('127.127.127.127', 111112)
# The TCP/IP address of the destination, pynetdicom includes port too
primitive.called_presentation_address = ('0.0.0.0', 0)
# Proposed presentation contexts
contexts = [
build_context('1.2.3.4.5.6', JPEGBaseline),
build_context('1.2.840.10008.1.1')
]
for ii, cx in enumerate(contexts):
cx.context_id = ii * 2 + 1
primitive.presentation_context_definition_list = contexts
item = MaximumLengthNotification()
item.maximum_length_received = 0
primitive.user_information.append(item)
item = ImplementationClassUIDNotification()
item.implementation_class_uid = generate_uid(entropy_srcs=['lorem'])
primitive.user_information.append(item)
self.associate_rq = primitive
# A-ASSOCIATE (accept)
primitive = A_ASSOCIATE()
primitive.application_context_name = APPLICATION_CONTEXT_NAME
# Calling AE Title is the source DICOM AE title
primitive.calling_ae_title = b'ABCDEFGHIJKLMNOP'
# Called AE Title is the destination DICOM AE title
primitive.called_ae_title = b'1234567890123456'
# The TCP/IP address of the source, pynetdicom includes port too
primitive.calling_presentation_address = ('127.127.127.127', 111112)
# The TCP/IP address of the destination, pynetdicom includes port too
primitive.called_presentation_address = ('0.0.0.0', 0)
# Proposed presentation contexts
contexts = [
build_context('1.2.3.4.5.6', JPEGBaseline),
build_context('1.2.840.10008.1.1'),
build_context('1.2.840.10008.1.1'),
build_context('1.2.840.10008.1.1'),
build_context('1.2.840.10008.1.1'),
]
for ii, cx in enumerate(contexts):
cx.context_id = ii * 2 + 1
cx.result = ii
primitive.presentation_context_definition_results_list = contexts
primitive.result = 0x00
item = MaximumLengthNotification()
item.maximum_length_received = 0
primitive.user_information.append(item)
item = ImplementationClassUIDNotification()
item.implementation_class_uid = generate_uid(entropy_srcs=['lorem'])
primitive.user_information.append(item)
self.associate_ac = primitive
def teardown(self):
"""Cleanup after each test"""
if self.ae:
self.ae.shutdown()
def add_impl_name(self, primitive, name=b'A '):
"""Add an Implementation Version Name to the A-ASSOCIATE primitive."""
assert len(name) == 16
item = ImplementationVersionNameNotification()
item.implementation_version_name = name
primitive.user_information.append(item)
return primitive
def add_user_identity(self, primitive, id_type, primary, secondary, response):
"""Add User Identity to the A-ASSOCIATE primitive."""
item = UserIdentityNegotiation()
item.user_identity_type = id_type
item.primary_field = primary
item.secondary_field = secondary
item.positive_response_requested = response
primitive.user_information.append(item)
def add_user_identity_rsp(self, primitive):
"""Add User Identity (rsp) to the A-ASSOCIATE primitive."""
item = UserIdentityNegotiation()
item.server_response = b'this is the response'
primitive.user_information.append(item)
def add_async_ops(self, primitive):
"""Add Asynchronous Ops to the A-ASSOCIATE primitive."""
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
primitive.user_information.append(item)
def add_scp_scu_role(self, primitive):
"""Add SCP/SCU Role Selection to the A-ASSOCIATE primitive."""
contexts = [
build_context('1.2.840.10008.1.1'),
build_context('1.2.840.10008.1.2'),
build_context('1.2.840.10008.1.3'),
build_context('1.2.840.10008.1.4'),
]
for ii, cx in enumerate(contexts):
cx.context_id = ii * 2 + 1
primitive.presentation_context_definition_list = contexts
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.840.10008.1.2'
item.scu_role = True
item.scp_role = False
primitive.user_information.append(item)
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.840.10008.1.3'
item.scu_role = False
item.scp_role = True
primitive.user_information.append(item)
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.840.10008.1.4'
item.scu_role = True
item.scp_role = True
primitive.user_information.append(item)
def add_sop_ext(self, primitive):
"""Add SOP Class Extended to the A-ASSOCIATE primitive."""
req = {
'1.2.3.4' : b'\x00\x01',
'1.2.840.10008.1.1' : b'\x00\x01\x02\x03' * 10
}
for uid, data in req.items():
item = SOPClassExtendedNegotiation()
item.sop_class_uid = uid
item.service_class_application_information = data
primitive.user_information.append(item)
def add_sop_common(self, primitive):
"""Add SOP Class Common Extended to the A-ASSOCIATE primitive."""
req = {
'1.2.3.4' : ('1.2.3', []),
'1.2.3.4.5' : ('1.2.3', ['1.2.1', '1.4.3']),
'1.2.840.10008.1.1' : ('1.2.840.10008.4.2', []),
'1.2.840.10008.1.1.1' : ('1.2.840.10008.4.2',
[CTImageStorage, '1.9.1']),
}
for uid, data in req.items():
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = uid
item.service_class_uid = data[0]
item.related_general_sop_class_identification = data[1]
primitive.user_information.append(item)
# debug_send_associate_rq
def test_send_assoc_rq_minimal(self, caplog):
"""Test standard PDU logging handler with minimal A-ASSOCIATE-RQ."""
with caplog.at_level(logging.DEBUG, logger='pynetdicom'):
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
pdu = A_ASSOCIATE_RQ()
pdu.from_primitive(self.associate_rq)
evt.trigger(
assoc, evt.EVT_PDU_SENT, {'pdu' : pdu}
)
messages = [
"Our Implementation Class UID: 1.2.826.0.1.3680043.8.498"
".10207287587329888519122978685894984263",
"Calling Application Name: ABCDEFGHIJKLMNOP",
"Called Application Name: 1234567890123456",
"Our Max PDU Receive Size: 0",
"Presentation Contexts:",
"Context ID: 1 (Proposed)",
"Abstract Syntax: =1.2.3.4.5.6",
"Proposed SCP/SCU Role: Default",
"Proposed Transfer Syntax:",
"=JPEG Baseline (Process 1)",
"Context ID: 3 (Proposed)",
"Abstract Syntax: =Verification SOP Class",
"Proposed SCP/SCU Role: Default",
"Proposed Transfer Syntaxes:",
"=Implicit VR Little Endian",
"=Explicit VR Little Endian",
"=Explicit VR Big Endian",
"Requested Extended Negotiation: None",
"Requested Common Extended Negotiation: None",
"Requested Asynchronous Operations Window Negotiation: None",
"Requested User Identity Negotiation: None",
]
for msg in messages:
assert msg in caplog.text
assoc.release()
| |
import argparse
from collections import defaultdict
import datetime
import threading
import os
import sys
import time
import pickle
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s')
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from concurrent.futures import as_completed
from multiprocessing import Manager
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
from tqdm import tqdm
from dycause_lib.anomaly_detect import anomaly_detect
# loop_granger是Granger causal interval 作者提供的代码
from dycause_lib.Granger_all_code import loop_granger
from dycause_lib.causal_graph_build import get_segment_split
from dycause_lib.causal_graph_build import get_ordered_intervals
from dycause_lib.causal_graph_build import get_overlay_count
from dycause_lib.causal_graph_build import normalize_by_row, normalize_by_column
from dycause_lib.randwalk import randwalk
from dycause_lib.ranknode import ranknode, analyze_root
from dycause_lib.draw_graph import *
from util_funcs.loaddata import load
from util_funcs.draw_graph import draw_weighted_graph
from util_funcs.evaluation_function import prCal, my_acc, pr_stat, print_prk_acc
from util_funcs.format_ouput import format_to_excel
from util_funcs.excel_utils import saveToExcel
def granger_process(
shared_params_dict,
specific_params,
shared_result_dict):
try:
# with open(common_params_filename, 'rb') as f:
# common_params = pickle.load(f)
common_params = shared_params_dict
ret = loop_granger(
common_params['local_data'],
common_params['data_head'],
common_params['dir_output'],
common_params['data_head'][specific_params['x_i']],
common_params['data_head'][specific_params['y_i']],
common_params['significant_thres'],
common_params['method'],
common_params['trip'],
common_params['lag'],
common_params['step'],
common_params['simu_real'],
common_params['max_segment_len'],
common_params['min_segment_len'],
verbose=False,
return_result=True,
)
except Exception as e:
print("Exception occurred at {} -> {}!".format(
specific_params['x_i'], specific_params['y_i']), e)
logging.error("Exception occurred at {} -> {}!".format(
specific_params['x_i'], specific_params['y_i']))
ret = (None, None, None, None, None)
shared_result_dict['{}->{}'.format(specific_params['x_i'], specific_params['y_i'])] = ret
return ret
def test_dycause(
# Data params
data_source="real_micro_service",
aggre_delta=1,
start_time=None,
before_length=300,
after_length=300,
# Granger interval based graph construction params
step=50,
significant_thres=0.05,
lag=5, # must satisfy: step > 3 * lag + 1
auto_threshold_ratio=0.8,
runtime_debug=False,
# Root cause analysis params
testrun_round=1,
frontend=14,
max_path_length=None,
mean_method="arithmetic",
true_root_cause=[28],
topk_path=60,
num_sel_node=1,
# Debug params
plot_figures=False,
verbose=True,
max_workers=5,
**kws,
):
"""
Params:
plot_figures: whether plot result figures. Can be a list of figure names, such as ['all-data', 'abnormal-data',
'dycurves', 'aggre-imgs', 'graph']. Can also True for enable all figure plots, False for disable all.
runtime_debug: whether enable runtime debug mode, where loop_granger is always executed.
"""
if runtime_debug:
time_stat_dict = {}
tic = time.time()
if 'disable_print' not in kws or kws['disable_print'] is False:
print("{:#^80}".format(" DyCause "))
dir_output = "dycause/results/" + data_source
os.makedirs(dir_output, exist_ok=True)
if verbose:
print("{:-^80}".format("Data load phase"))
# region Load and preprocess data
data, data_head = load(
os.path.join("data", data_source, "rawdata.xlsx"),
normalize=True,
zero_fill_method='prevlatter',
aggre_delta=aggre_delta,
verbose=verbose,
)
# Plot all data if asked
if (plot_figures is True) or (isinstance(plot_figures, list) and 'all-data' in plot_figures):
draw_alldata(
data,
data_head,
os.path.join(dir_output, "all-data-L{}.png".format(data.shape[0])),
)
# endregion
# region Set start time in data to analyze if not provided
anomaly_score = 'Not calculated'
if start_time is None:
start_time, anomaly_score = anomaly_detect(
data,
weight=1,
mean_interval=50,
anomaly_proportion=0.3,
verbose=verbose,
save_fig=(plot_figures is True) or (isinstance(
plot_figures, list) and 'anomaly-score' in plot_figures),
path_output=dir_output,
)
if verbose:
print(
"{space:^10}{name1:<30}: {}\n"
"{space:^10}{name2:<30}: {}".format(
start_time,
anomaly_score,
space="",
name1="Start time",
name2="Abnormal score",
)
)
# plot abnormal data of each services if asked
if (plot_figures is True) or (isinstance(plot_figures, list) and 'abnormal-data' in plot_figures):
draw_alldata(
data[start_time - before_length: start_time + after_length, :],
data_head,
os.path.join(
dir_output,
"abnomal-data-plot-S{}-E{}.png".format(
start_time - before_length, start_time + after_length
),
),
)
# endregion
if runtime_debug:
toc = time.time()
time_stat_dict['Load phase'] = toc-tic
tic = toc
# region Run loop_granger to get the all intervals
if verbose:
print("{:-^80}".format("Granger interval based impact graph construction phase"))
local_length = before_length + after_length
local_data = data[start_time - before_length: start_time + after_length, :]
method = "fast_version_3"
trip = -1
simu_real = "simu"
max_segment_len = before_length + after_length
min_segment_len = step
list_segment_split = get_segment_split(before_length + after_length, step)
local_results_file_path = os.path.join(
dir_output,
"local-results",
"aggregate-{}".format(aggre_delta),
"local_results"
"_start{start}_bef{bef}_aft{aft}_lag{lag}_sig{sig}_step{step}_min{min}_max{max}.pkl".format(
start=start_time,
bef=before_length,
aft=after_length,
lag=lag,
sig=significant_thres,
step=step,
min=min_segment_len,
max=max_segment_len,
),
)
if os.path.exists(local_results_file_path) and not runtime_debug:
if verbose:
print(
"{:^10}".format(
"") + "Loading previous granger interval results:",
os.path.basename(local_results_file_path),
)
with open(local_results_file_path, "rb") as f:
local_results = pickle.load(f)
else:
if verbose:
print(
"{space:^10}{name}:\n"
"{space:^15}bef len :{bef}\n"
"{space:^15}aft len :{aft}\n"
"{space:^15}lag :{lag}\n"
"{space:^15}significant :{sig}\n"
"{space:^15}step :{step}\n"
"{space:^15}min len :{min}\n"
"{space:^15}max len :{max}\n"
"{space:^15}segment split:".format(
space="",
name="Calculating granger intervals",
bef=before_length,
aft=after_length,
lag=lag,
sig=significant_thres,
step=step,
min=min_segment_len,
max=max_segment_len,
),
list_segment_split,
)
local_results = defaultdict(dict)
# region normal single thread version
# for x_i in range(len(data_head)):
# for y_i in range(len(data_head)):
# if x_i == y_i:
# continue
# feature = data_head[x_i]
# target = data_head[y_i]
# (total_time, time_granger, time_adf, array_results_YX,
# array_results_XY) = granger_process(x_i, y_i)
# print('Iter {:2d}->{:2d} '
# 'Total time :{:5.4f} '
# 'Granger time:{:5.4f} '
# 'Adf time :{:5.4f}'.format(x_i, y_i,
# total_time,
# time_granger,
# time_adf),
# end='\r')
# matrics = [array_results_YX, array_results_XY]
# ordered_intervals = get_ordered_intervals(
# matrics, significant_thres, list_segment_split)
# local_results['%s->%s' %
# (x_i, y_i)]['intervals'] = ordered_intervals
# local_results['%s->%s' %
# (x_i, y_i)]['result_YX'] = array_results_YX
# local_results['%s->%s' %
# (x_i, y_i)]['result_XY'] = array_results_XY
# # skip the \r print line
# print('')
# endregion
# region ThreadPoolExecuter&ProcessPoolExecutor version
total_thread_num = [len(data_head) * (len(data_head) - 1)]
thread_results = [0 for i in range(total_thread_num[0])]
if verbose:
pbar = tqdm(total=total_thread_num[0], ascii=True)
common_params_filename = os.path.join(
dir_output, 'local-results', 'common_params.pkl')
common_params = {
'local_data': local_data,
'data_head': data_head,
'dir_output': dir_output,
'significant_thres': significant_thres,
'method': method,
'trip': trip,
'lag': lag,
'step': step,
'simu_real': simu_real,
'max_segment_len': max_segment_len,
'min_segment_len': min_segment_len
}
manager = Manager()
shared_params_dict = manager.dict()
shared_result_dict = manager.dict()
for key, value in common_params.items():
shared_params_dict[key]=value
# with open(common_params_filename, 'wb') as f:
# pickle.dump(common_params, f)
executor = ProcessPoolExecutor(max_workers=max_workers)
i = 0
futures = []
tic = time.time()
for x_i in range(len(data_head)):
for y_i in range(len(data_head)):
if x_i == y_i:
continue
futures.append(executor.submit(
granger_process,
shared_params_dict,
{'x_i': x_i, 'y_i': y_i},
shared_result_dict
)
)
i = i + 1
future_complete_time = []
if verbose:
for future in as_completed(futures):
pbar.update(1)
future_complete_time.append(time.time()-tic)
pbar.close()
# save_path = os.path.join(dir_output, 'local-results', 'future-complete-time.pkl')
# os.makedirs(os.path.dirname(save_path), exist_ok=True)
# with open(save_path, 'wb') as f:
# pickle.dump(future_complete_time, f)
executor.shutdown(wait=True)
# print('shared_result_dict keys: ', list(shared_result_dict.keys()))
# exit(0)
i = 0
for x_i in range(len(data_head)):
for y_i in range(len(data_head)):
if x_i == y_i:
continue
# (
# total_time,
# time_granger,
# time_adf,
# array_results_YX,
# array_results_XY,
# ) = futures[i].result()
(
total_time,
time_granger,
time_adf,
array_results_YX,
array_results_XY,
) = shared_result_dict['{}->{}'.format(x_i, y_i)]
matrics = [array_results_YX, array_results_XY]
ordered_intervals = get_ordered_intervals(
matrics, significant_thres, list_segment_split
)
local_results["%s->%s" %
(x_i, y_i)]["intervals"] = ordered_intervals
local_results["%s->%s" %
(x_i, y_i)]["result_YX"] = array_results_YX
local_results["%s->%s" %
(x_i, y_i)]["result_XY"] = array_results_XY
i = i + 1
# endregion
if not runtime_debug:
# Only save local results if not in runtime debug mode
os.makedirs(os.path.dirname(
local_results_file_path), exist_ok=True)
with open(local_results_file_path, "wb") as f:
pickle.dump(local_results, f)
# endregion
if runtime_debug:
toc = time.time()
time_stat_dict['granger causal intervals'] = toc - tic
tic = toc
# region Construction impact graph using generated intervals
# Generate dynamic causal curve between two services by overlaying intervals
histogram_sum = defaultdict(int)
edge = []
edge_weight = dict()
for x_i in range(len(data_head)):
for y_i in range(len(data_head)):
if y_i == x_i:
continue
key = <KEY>
intervals = local_results[key]["intervals"]
overlay_counts = get_overlay_count(local_length, intervals)
# whether plot temporaray figure pair wise
if (plot_figures is True) or (isinstance(plot_figures, list) and 'dycurves' in plot_figures):
os.makedirs(os.path.join(
dir_output, "dynamic-causal-curves"), exist_ok=True)
if verbose:
print(
"{:^10}Ploting {:2d}->{:2d}".format("", x_i + 1, y_i + 1), end="\r"
)
draw_overlay_histogram(
overlay_counts,
"{}->{}".format(x_i + 1, y_i + 1),
os.path.join(
dir_output, "dynamic-causal-curves", "{0}-{1}.png".format(
x_i + 1, y_i + 1)
),
)
histogram_sum[key] = sum(overlay_counts)
# skip the \r print line
if (plot_figures is True) or (isinstance(plot_figures, list) and 'dycurves' in plot_figures) and verbose:
print("")
# Make edges from 1 node using comparison and auto-threshold
for x_i in range(len(data_head[:])):
bar_data = []
for y_i in range(len(data_head)):
key = "{0}->{1}".format(<KEY>
bar_data.append(histogram_sum[key])
# whether plot temporary figure from one node
if (plot_figures is True) or (isinstance(plot_figures, list) and 'aggre-imgs' in plot_figures):
if not os.path.exists(os.path.join(dir_output, "aggre-imgs")):
os.makedirs(os.path.join(dir_output, "aggre-imgs"))
if verbose:
print("{:^10}Ploting aggre imgs {:2d}".format("", x_i + 1),
end="\r")
draw_bar_histogram(
bar_data, auto_threshold_ratio,
"From service {0}".format(x_i + 1),
os.path.join(dir_output, "aggre-imgs",
"{0}.png".format(x_i + 1)),
)
bar_data_thres = np.max(bar_data) * auto_threshold_ratio
for y_i in range(len(data_head)):
if bar_data[y_i] >= bar_data_thres:
edge.append((x_i, y_i))
edge_weight[(x_i, y_i)] = bar_data[y_i]
# skip the \r print line
if (plot_figures is True) or (isinstance(plot_figures, list) and 'aggre-imgs' in plot_figures) and verbose:
print("")
# Make the transition matrix with edge weight estimation
transition_matrix = np.zeros([data.shape[1], data.shape[1]])
for key, val in edge_weight.items():
x, y = key
transition_matrix[x, y] = val
transition_matrix = normalize_by_column(transition_matrix)
# transition_matrix = normalize_by_row(transition_matrix)
def save_graph_excel(filename_prefix, matrix):
common_suffix = "-bef{}-aft{}-step{}-lag{}-thres{}".format(
before_length,
after_length,
step,
lag, auto_threshold_ratio)
if (plot_figures is True) or (isinstance(plot_figures, list) and 'graph' in plot_figures):
draw_weighted_graph(
matrix,
os.path.join(
dir_output,
filename_prefix + | |
'like', 'coin', 'favorite',
'reply', 'share', 'danmaku']
data = [json_req[index] for index in need]
if len(rank_list):
data = [time_str(), *data, *rank_list[:2], *rank_list[-2:]]
else:
data = [time_str(), *data]
self.data_v2[av_id] = data
def have_error(self, json_req: dict, types=None) -> bool:
''' check json_req'''
if json_req is None:
return False
if 'code' not in json_req or json_req['code'] != 0:
return False
if 'message' not in json_req or json_req['message'] != '0':
return False
if 'ttl' not in json_req or json_req['ttl'] != 1:
return False
if not types is None:
if 'data' not in json_req or 'now' not in json_req['data']:
return False
return True
def check_type(self, av_id: int):
''' check type '''
if av_id in self.rank_type:
return self.rank_type[av_id]
if av_id in self.rank_map and not len(self.rank_map[av_id]):
self.rank_type[av_id] = True
return True
return 2
def check_type_req(self, av_id: int):
changeHeaders({'Referer': self.BASIC_AV_URL % av_id})
url = self.VIEW_URL % av_id
json_req = proxy_req(url, 1)
if json_req is None or 'data' not in json_req or 'tid' not in json_req['data']:
if can_retry(url):
self.check_type_req(av_id)
return
self.rank_type[av_id] = json_req['data']['tid'] == self.assign_tid
def add_av(self, av_id: int, rank: int, score: int) -> bool:
''' decide add av '''
if av_id not in self.rank_map:
return rank < 95 or score > 5000
else:
if not len(self.rank_map[av_id]):
return True
else:
if self.rank_map[av_id][0] - rank > 5:
return True
return score - self.rank_map[av_id][1] > 200
def public_monitor(self, av_id: int, times: int):
''' a monitor '''
self.public_list.append(av_id)
data_time, mid = self.public[av_id]
self.get_star_num(mid, 0)
self.check_rank_v2(av_id, 0)
time.sleep(5)
follower = self.star[mid] if mid in self.star else 0
origin_data = self.data_v2[av_id] if av_id in self.data_v2 else []
sleep_time = data_time + one_day - int(time.time())
if sleep_time < 0:
return
print('Monitor Begin %d' % (av_id))
time.sleep(sleep_time)
self.get_star_num(mid, 0)
self.check_rank_v2(av_id, 0)
time.sleep(5)
follower_2 = self.star[mid] if mid in self.star else 0
one_day_data = self.data_v2[av_id] if av_id in self.data_v2 else []
data = [time_str(data_time), av_id, follower,
follower_2, *origin_data, *one_day_data]
with codecs.open(data_dir + 'public.csv', 'a', encoding='utf-8') as f:
f.write(','.join([str(ii) for ii in data]) + '\n')
def public_data(self, av_id: int, times: int):
''' get public basic data '''
changeHeaders({'Referer': self.BASIC_AV_URL % av_id})
url = self.VIEW_URL % av_id
json_req = proxy_req(url, 1)
if json_req is None or not 'data' in json_req or not 'pubdate' in json_req['data']:
if times < 3:
self.public_data(av_id, times + 1)
return
data_time = json_req['data']['pubdate']
mid = json_req['data']['owner']['mid']
self.get_star_num(mid, 0)
self.public[av_id] = [data_time, mid]
def get_star_num(self, mid: int, times: int, load_disk=False):
''' get star num'''
url = self.RELATION_STAT_URL % mid
header = {**headers, **
{'Origin': self.BILIBILI_URL, 'Referer': self.AV_URL}}
if 'Host' in header:
del header['Host']
req = proxy_req(url, 2, header=header)
if req is None or req.status_code != 200 or len(req.text) < 8 or not '{' in req.text:
if times < 3:
self.get_star_num(mid, times + 1, load_disk)
return
try:
json_req = json.loads(req.text[7:-1])
self.star[mid] = json_req['data']['follower']
if load_disk and self.check_star(mid, self.star[mid]):
self.last_star[mid] = self.star[mid]
with open('{}star.csv'.format(data_dir), 'a') as f:
f.write('%s,%d\n' % (time_str(), self.star[mid]))
except:
pass
def check_rank_rose(self, av_id: int, rank_list: list):
''' check rank rose '''
if not self.check_rank_list(av_id, rank_list):
return
rank, score = rank_list[:2]
av_id_id = int(av_id) * 10 + int(rank_list[-1])
if av_id_id not in self.rank:
self.rank[av_id_id] = [rank_list[0] // 10]
else:
self.rank[av_id_id].append(rank_list[0] // 10)
self.last_rank[av_id_id] = rank_list[0]
send_email('%d day List || Rank: %d Score: %d' % (int(
rank_list[-1]), rank, score), '%d day List || Rank: %d Score: %d' % (int(rank_list[-1]), rank, score))
def check_star(self, mid: int, star: int) -> bool:
''' check star '''
if not mid in self.last_star:
return True
last_star = self.last_star[mid]
if last_star > star:
return False
if last_star + self.view_abnormal < star:
return False
return True
def load_rank_index(self, index: int, day_index: int):
''' load rank '''
changeHeaders({'Referer': self.AV_URL})
url = self.RANKING_URL % (index, day_index)
text = basic_req(url, 3)
rank_str = re.findall('window.__INITIAL_STATE__=(.*?);', text)
if not len(rank_str):
if can_retry(url):
self.load_rank_index(index, day_index)
return False
rank_map = json.loads(rank_str[0])
rank_list = rank_map['rankList']
now_av_id = []
wait_check_public = []
rank_map = {}
for ii, rank in enumerate(rank_list):
av_id = int(rank['aid'])
need_params = ['pts','author','mid','play','video_review', 'coins', 'duration', 'title']
temp_rank_list = [ii, *[rank[ii] for ii in need_params], index, day_index]
now_av_id.append(av_id)
if not self.check_type(av_id):
continue
self.check_rank_rose(av_id, temp_rank_list)
if self.add_av(av_id, ii, temp_rank_list[1]):
rank_map[av_id] = temp_rank_list
''' check assign av rank '''
for ii in self.assign_ids:
if not ii in self.public:
wait_check_public.append(ii)
if not ii in self.last_view and not ii in self.rank_map:
self.rank_map[ii] = []
have_assign = len([0 for ii in self.assign_ids if ii in now_av_id]) > 0
''' check tid type '''
threading_public = []
for ii in rank_map.keys():
work = threading.Thread(target=self.check_type_req, args=(ii,))
threading_public.append(work)
for work in threading_public:
work.start()
for work in threading_public:
work.join()
for ii, jj in rank_map.items():
if self.check_type(ii) != True:
continue
if not ii in self.public:
wait_check_public.append(ii)
self.last_check[ii] = int(time.time())
self.rank_map[ii] = jj
''' load public basic data '''
threading_public = []
for ii in wait_check_public:
work = threading.Thread(target=self.public_data, args=(ii, 0,))
threading_public.append(work)
for work in threading_public:
work.start()
for work in threading_public:
work.join()
''' begin monitor '''
threading_list = []
for ii, jj in self.public.items():
if not ii in self.public_list and jj[0] + one_day > int(time.time()):
work = threading.Thread(
target=self.public_monitor, args=(ii, 0,))
threading_list.append(work)
for work in threading_list:
work.start()
return have_assign
def load_rank(self):
''' load rank '''
assign_1 = self.load_rank_index(1, 1)
assign_2 = self.load_rank_index(1, 3)
have_assign = assign_1 or assign_2
print(assign_1, assign_2, have_assign)
if self.have_assign and not have_assign:
send_email('No rank.....No Rank......No Rank.....',
'No rank.....No Rank......No Rank.....')
self.have_assign = have_assign
print('Rank_map_len:', len(self.rank_map.keys()), 'Empty:',
len([1 for ii in self.rank_map.values() if not len(ii)]))
youshan = [','.join([str(kk) for kk in [ii, *jj]])
for ii, jj in self.rank_map.items()]
with codecs.open(data_dir + 'youshang', 'w', encoding='utf-8') as f:
f.write('\n'.join(youshan))
def load_click(self, num=1000000):
''' schedule click '''
self.rank_map = {ii: [] for ii in self.assign_ids}
for index in range(num):
threading_list = []
if not index % 5:
threading_list.append(threading.Thread(target=self.load_rank, args=()))
threading_list.append(threading.Thread(target=self.load_history_data, args=()))
if not index % 15:
threading_list.append(threading.Thread(target=self.get_star_num, args=(self.assign_up_mid, 0, True)))
threading_list.append(threading.Thread(target=self.update_proxy, args=()))
threading_list.append(threading.Thread(target=self.load_configure, args=()))
threading_list.append(threading.Thread(target=self.get_check, args=()))
for av_id in self.rank_map:
if av_id in self.av_id_list or av_id in self.assign_ids:
threading_list.append(threading.Thread(target=self.check_rank, args=(av_id,)))
elif index % 3 == 2:
threading_list.append(threading.Thread(target=self.check_rank, args=(av_id,)))
for work in threading_list:
work.start()
time.sleep(120)
def update_proxy(self):
global proxy_req
proxy_req = GetFreeProxy().proxy_req
def update_ini(self, av_id: int):
cfg = ConfigParser()
cfg.read(assign_path, 'utf-8')
cfg.set('basic', 'basic_av_id', str(av_id))
history_av_ids = cfg.get('assign', 'av_ids')
cfg.set('assign', 'av_ids', '{},{}'.format(history_av_ids, av_id))
cfg.write(open(assign_path, 'w'))
def get_check(self):
''' check comment '''
self.load_av_lists()
av_id_list = [[ii['aid'], ii['comment']] for ii in self.av_id_map.values() if not re.findall(self.ignore_list, str(ii['aid']))]
av_map = {ii['aid']: ii for ii in self.av_id_map.values()}
self.comment_next = {ii: True for (ii, _) in av_id_list}
if self.av_id_list and len(self.av_id_list) and len(self.av_id_list) != len(av_id_list):
new_av_id = [ii for (ii, _) in av_id_list if not ii in self.av_id_list and not ii in self.del_map]
self.rank_map = {**self.rank_map, **{ii:[] for ii in new_av_id}}
echo(1, new_av_id)
for ii in new_av_id:
shell_str = 'nohup ipython3 bilibili/bsocket.py {} %d >> log.txt 2>&1 &'.format(ii)
echo(0, shell_str)
os.system(shell_str % 1)
os.system(shell_str % 2)
email_str = '{} av:{} was releasing at {}!!! Please check the auto pipeline.'.format(av_map[ii]['title'], ii, time_str(av_map[ii]['created']))
email_str2 = '{} {} is release at {}.\nPlease check the online & common program.\n\nBest wish for you\n--------\nSend from script by gunjianpan.'.format(av_map[ii]['title'], time_str(av_map[ii]['created']), self.BASIC_AV_URL % ii)
send_email(email_str2, email_str)
self.update_ini(ii)
self.public[ii] = [av_map[ii]['created'], av_map[ii]['mid']]
self.av_id_list = [ii for (ii,_) in av_id_list]
now_hour = int(time_str(time_format='%H'))
now_min = int(time_str(time_format='%M'))
now_time = now_hour + now_min / 60
if now_time > self.ignore_start and now_time < self.ignore_end:
return
if os.path.exists('{}comment.pkl'.format(comment_dir)):
with codecs.open('{}comment.pkl'.format(comment_dir), 'rb') as f:
self.comment = pickle.load(f)
if self.assign_up_mid == -1:
return
threading_list = []
for (ii, jj) in av_id_list:
if ii not in self.comment:
self.comment[ii] = {}
work = threading.Thread(
target=self.comment_check_schedule, args=(ii, jj,))
threading_list.append(work)
for work in threading_list:
work.start()
for work in threading_list:
work.join()
with codecs.open('{}comment.pkl'.format(comment_dir), 'wb') as f:
pickle.dump(self.comment, f)
return av_id_list
def comment_check_schedule(self, av_id: int, comment: int):
''' schedule comment check thread '''
for pn in range(1, (comment - 1) // 20 + 2):
if not self.comment_next[av_id]:
return
echo(2, 'Comment check, av_id:', av_id, 'pn:', pn)
self.check_comment_once(av_id, pn)
comment = [self.comment[av_id][k] for k in sorted(self.comment[av_id].keys())]
basic = [','.join([str(jj) for jj in ii['basic']])
for ii in comment if 'basic' in | |
<filename>neutron/agent/l3/router_info.py
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import netaddr
from neutron_lib import constants as lib_constants
from neutron_lib.utils import helpers
from oslo_log import log as logging
import six
from neutron._i18n import _, _LE, _LW
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import ra
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import utils as common_utils
from neutron.ipam import utils as ipam_utils
LOG = logging.getLogger(__name__)
INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
FLOATINGIP_STATUS_NOCHANGE = object()
ADDRESS_SCOPE_MARK_MASK = "0xffff0000"
ADDRESS_SCOPE_MARK_ID_MIN = 1024
ADDRESS_SCOPE_MARK_ID_MAX = 2048
DEFAULT_ADDRESS_SCOPE = "noscope"
class RouterInfo(object):
def __init__(self,
agent,
router_id,
router,
agent_conf,
interface_driver,
use_ipv6=False):
self.agent = agent
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self.fip_map = {}
self.internal_ports = []
self.pd_subnets = {}
self.floating_ips = set()
# Invoke the setter for establishing initial SNAT action
self.router = router
self.use_ipv6 = use_ipv6
ns = self.create_router_namespace_object(
router_id, agent_conf, interface_driver, use_ipv6)
self.router_namespace = ns
self.ns_name = ns.name
self.available_mark_ids = set(range(ADDRESS_SCOPE_MARK_ID_MIN,
ADDRESS_SCOPE_MARK_ID_MAX))
self._address_scope_to_mark_id = {
DEFAULT_ADDRESS_SCOPE: self.available_mark_ids.pop()}
self.iptables_manager = iptables_manager.IptablesManager(
use_ipv6=use_ipv6,
namespace=self.ns_name)
self.initialize_address_scope_iptables()
self.routes = []
self.agent_conf = agent_conf
self.driver = interface_driver
# radvd is a neutron.agent.linux.ra.DaemonMonitor
self.radvd = None
def initialize(self, process_monitor):
"""Initialize the router on the system.
This differs from __init__ in that this method actually affects the
system creating namespaces, starting processes, etc. The other merely
initializes the python object. This separates in-memory object
initialization from methods that actually go do stuff to the system.
:param process_monitor: The agent's process monitor instance.
"""
self.process_monitor = process_monitor
self.radvd = ra.DaemonMonitor(self.router_id,
self.ns_name,
process_monitor,
self.get_internal_device_name,
self.agent_conf)
self.router_namespace.create()
def create_router_namespace_object(
self, router_id, agent_conf, iface_driver, use_ipv6):
return namespaces.RouterNamespace(
router_id, agent_conf, iface_driver, use_ipv6)
@property
def router(self):
return self._router
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
def is_router_master(self):
return True
def get_internal_device_name(self, port_id):
return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_interface_name(self, ex_gw_port):
return self.get_external_device_name(ex_gw_port['id'])
def get_gw_ns_name(self):
return self.ns_name
def _update_routing_table(self, operation, route, namespace):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def update_routing_table(self, operation, route):
self._update_routing_table(operation, route, self.ns_name)
def routes_updated(self, old_routes, new_routes):
adds, removes = helpers.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is '%s'", route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
self.update_routing_table('replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
self.update_routing_table('delete', route)
def get_ex_gw_port(self):
return self.router.get('gw_port')
def get_floating_ips(self):
"""Filter Floating IPs to be hosted on this agent."""
return self.router.get(lib_constants.FLOATINGIP_KEY, [])
def floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s/32 -j DNAT --to-destination %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s/32 -j SNAT --to-source %s' %
(fixed_ip, floating_ip))]
def floating_mangle_rules(self, floating_ip, fixed_ip, internal_mark):
mark_traffic_to_floating_ip = (
'floatingip', '-d %s/32 -j MARK --set-xmark %s' % (
floating_ip, internal_mark))
mark_traffic_from_fixed_ip = (
'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip)
return [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip]
def get_address_scope_mark_mask(self, address_scope=None):
if not address_scope:
address_scope = DEFAULT_ADDRESS_SCOPE
if address_scope not in self._address_scope_to_mark_id:
self._address_scope_to_mark_id[address_scope] = (
self.available_mark_ids.pop())
mark_id = self._address_scope_to_mark_id[address_scope]
# NOTE: Address scopes use only the upper 16 bits of the 32 fwmark
return "%s/%s" % (hex(mark_id << 16), ADDRESS_SCOPE_MARK_MASK)
def get_port_address_scope_mark(self, port):
"""Get the IP version 4 and 6 address scope mark for the port
:param port: A port dict from the RPC call
:returns: A dict mapping the address family to the address scope mark
"""
port_scopes = port.get('address_scopes', {})
address_scope_mark_masks = (
(int(k), self.get_address_scope_mark_mask(v))
for k, v in port_scopes.items())
return collections.defaultdict(self.get_address_scope_mark_mask,
address_scope_mark_masks)
def process_floating_ip_nat_rules(self):
"""Configure NAT rules for the router's floating IPs.
Configures iptables rules for the floating ips of the given router
"""
# Clear out all iptables rules for floating ips
self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
# Rebuild iptables rules for the floating ip.
fixed = fip['fixed_ip_address']
fip_ip = fip['floating_ip_address']
for chain, rule in self.floating_forward_rules(fip_ip, fixed):
self.iptables_manager.ipv4['nat'].add_rule(chain, rule,
tag='floating_ip')
self.iptables_manager.apply()
def _process_pd_iptables_rules(self, prefix, subnet_id):
"""Configure iptables rules for prefix delegated subnets"""
ext_scope = self._get_external_address_scope()
ext_scope_mark = self.get_address_scope_mark_mask(ext_scope)
ex_gw_device = self.get_external_device_name(
self.get_ex_gw_port()['id'])
scope_rule = self.address_scope_mangle_rule(ex_gw_device,
ext_scope_mark)
self.iptables_manager.ipv6['mangle'].add_rule(
'scope',
'-d %s ' % prefix + scope_rule,
tag=('prefix_delegation_%s' % subnet_id))
def process_floating_ip_address_scope_rules(self):
"""Configure address scope related iptables rules for the router's
floating IPs.
"""
# Clear out all iptables rules for floating ips
self.iptables_manager.ipv4['mangle'].clear_rules_by_tag('floating_ip')
all_floating_ips = self.get_floating_ips()
ext_scope = self._get_external_address_scope()
# Filter out the floating ips that have fixed ip in the same address
# scope. Because the packets for them will always be in one address
# scope, no need to manipulate MARK/CONNMARK for them.
floating_ips = [fip for fip in all_floating_ips
if fip.get('fixed_ip_address_scope') != ext_scope]
if floating_ips:
ext_scope_mark = self.get_address_scope_mark_mask(ext_scope)
ports_scopemark = self._get_address_scope_mark()
devices_in_ext_scope = {
device for device, mark
in ports_scopemark[lib_constants.IP_VERSION_4].items()
if mark == ext_scope_mark}
# Add address scope for floatingip egress
for device in devices_in_ext_scope:
self.iptables_manager.ipv4['mangle'].add_rule(
'float-snat',
'-o %s -j MARK --set-xmark %s'
% (device, ext_scope_mark),
tag='floating_ip')
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
# Rebuild iptables rules for the floating ip.
fip_ip = fip['floating_ip_address']
# Send the floating ip traffic to the right address scope
fixed_ip = fip['fixed_ip_address']
fixed_scope = fip.get('fixed_ip_address_scope')
internal_mark = self.get_address_scope_mark_mask(fixed_scope)
mangle_rules = self.floating_mangle_rules(
fip_ip, fixed_ip, internal_mark)
for chain, rule in mangle_rules:
self.iptables_manager.ipv4['mangle'].add_rule(
chain, rule, tag='floating_ip')
def process_snat_dnat_for_fip(self):
try:
self.process_floating_ip_nat_rules()
except Exception:
# TODO(salv-orlando): Less broad catching
msg = _('L3 agent failure to setup NAT for floating IPs')
LOG.exception(msg)
raise n_exc.FloatingIpSetupException(msg)
def _add_fip_addr_to_device(self, fip, device):
"""Configures the floating ip address on the device.
"""
try:
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
device.addr.add(ip_cidr)
return True
except RuntimeError:
# any exception occurred here should cause the floating IP
# to be set in error state
LOG.warning(_LW("Unable to configure IP address for "
"floating IP: %s"), fip['id'])
def add_floating_ip(self, fip, interface_name, device):
raise NotImplementedError()
def gateway_redirect_cleanup(self, rtr_interface):
pass
def remove_floating_ip(self, device, ip_cidr):
device.delete_addr_and_conntrack_state(ip_cidr)
def move_floating_ip(self, fip):
return lib_constants.FLOATINGIP_STATUS_ACTIVE
def remove_external_gateway_ip(self, device, ip_cidr):
device.delete_addr_and_conntrack_state(ip_cidr)
def get_router_cidrs(self, device):
return set([addr['cidr'] for addr in device.addr.list()])
def process_floating_ip_addresses(self, interface_name):
"""Configure IP addresses on router's external gateway interface.
Ensures addresses for existing floating IPs and cleans up
those that should not longer be configured.
"""
fip_statuses = {}
if interface_name is None:
LOG.debug('No Interface for floating IPs router: %s',
self.router['id'])
return fip_statuses
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
existing_cidrs = self.get_router_cidrs(device)
new_cidrs = set()
gw_cidrs = self._get_gw_ips_cidr()
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
fip_ip = fip['floating_ip_address']
ip_cidr = common_utils.ip_to_cidr(fip_ip)
new_cidrs.add(ip_cidr)
fip_statuses[fip['id']] = lib_constants.FLOATINGIP_STATUS_ACTIVE
if ip_cidr not in existing_cidrs:
fip_statuses[fip['id']] = self.add_floating_ip(
fip, interface_name, device)
LOG.debug('Floating ip %(id)s added, status %(status)s',
{'id': fip['id'],
'status': fip_statuses.get(fip['id'])})
elif (fip_ip in self.fip_map and
self.fip_map[fip_ip] != fip['fixed_ip_address']):
LOG.debug("Floating IP was moved from fixed IP "
"%(old)s to %(new)s",
{'old': self.fip_map[fip_ip],
'new': fip['fixed_ip_address']})
fip_statuses[fip['id']] = self.move_floating_ip(fip)
elif fip_statuses[fip['id']] == fip['status']:
# mark the status as not changed. we can't remove it because
# that's how the caller determines that it was removed
fip_statuses[fip['id']] = FLOATINGIP_STATUS_NOCHANGE
fips_to_remove = (
ip_cidr for ip_cidr in existing_cidrs - new_cidrs - gw_cidrs
if common_utils.is_cidr_host(ip_cidr))
for ip_cidr in fips_to_remove:
LOG.debug("Removing floating ip %s from interface %s in "
"namespace %s", ip_cidr, interface_name, self.ns_name)
self.remove_floating_ip(device, ip_cidr)
return fip_statuses
def _get_gw_ips_cidr(self):
gw_cidrs = set()
ex_gw_port = self.get_ex_gw_port()
if ex_gw_port:
for ip_addr in ex_gw_port['fixed_ips']:
ex_gw_ip = ip_addr['ip_address']
addr = netaddr.IPAddress(ex_gw_ip)
if addr.version == lib_constants.IP_VERSION_4:
gw_cidrs.add(common_utils.ip_to_cidr(ex_gw_ip))
return gw_cidrs
def configure_fip_addresses(self, interface_name):
| |
"""Utility module for setting up different envs"""
import numpy as np
import structlog
from shapely.geometry import Point
from ray.rllib.agents.ppo import DEFAULT_CONFIG
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from deepcomp.util.constants import SUPPORTED_ENVS, SUPPORTED_AGENTS, SUPPORTED_SHARING, SUPPORTED_UE_ARRIVAL, \
SUPPORTED_UTILITIES
from deepcomp.env.single_ue.variants import RelNormEnv
from deepcomp.env.multi_ue.central import CentralRelNormEnv
from deepcomp.env.multi_ue.multi_agent import MultiAgentMobileEnv
from deepcomp.env.entities.user import User
from deepcomp.env.entities.station import Basestation
from deepcomp.env.entities.map import Map
from deepcomp.env.util.movement import RandomWaypoint
from deepcomp.util.callbacks import CustomMetricCallbacks
log = structlog.get_logger()
def get_env_class(env_type):
"""Return the env class corresponding to the string type (from CLI)"""
assert env_type in SUPPORTED_AGENTS, f"Environment type was {env_type} but has to be one of {SUPPORTED_AGENTS}."
if env_type == 'single':
# return DatarateMobileEnv
# return NormDrMobileEnv
return RelNormEnv
if env_type == 'central':
# return CentralDrEnv
# return CentralNormDrEnv
return CentralRelNormEnv
# return CentralMaxNormEnv
if env_type == 'multi':
return MultiAgentMobileEnv
def get_sharing_for_bs(sharing, bs_idx):
"""Return the sharing model for the given BS"""
# if it's not mixed, it's the same for all BS
if sharing != 'mixed':
assert sharing in SUPPORTED_SHARING
return sharing
# else loop through the available sharing models
sharing_list = ['resource-fair', 'rate-fair', 'proportional-fair']
return sharing_list[bs_idx % len(sharing_list)]
def create_small_map(sharing_model):
"""
Create small map and 2 BS
:returns: tuple (map, bs_list)
"""
map = Map(width=150, height=100)
bs1 = Basestation('A', Point(50, 50), get_sharing_for_bs(sharing_model, 0))
bs2 = Basestation('B', Point(100, 50), get_sharing_for_bs(sharing_model, 1))
bs_list = [bs1, bs2]
return map, bs_list
def create_dyn_small_map(sharing_model, bs_dist=100, dist_to_border=10):
"""Small env with 2 BS and dynamic distance in between"""
map = Map(width=2 * dist_to_border + bs_dist, height=2 * dist_to_border)
bs1 = Basestation('A', Point(dist_to_border, dist_to_border), sharing_model)
bs2 = Basestation('B', Point(dist_to_border + bs_dist, dist_to_border), sharing_model)
return map, [bs1, bs2]
def create_medium_map(sharing_model):
"""
Deprecated: Use dynamic medium env instead. Kept this to reproduce earlier results.
Same as large env, but with map restricted to areas with coverage.
Thus, optimal episode reward should be close to num_ues * eps_length * 10 (ie, all UEs are always connected)
"""
map = Map(width=205, height=85)
bs1 = Basestation('A', Point(45, 35), sharing_model)
bs2 = Basestation('B', Point(160, 35), sharing_model)
bs3 = Basestation('C', Point(100, 85), sharing_model)
bs_list = [bs1, bs2, bs3]
return map, bs_list
def create_dyn_medium_map(sharing_model, bs_dist=100, dist_to_border=10):
"""
Create map with 3 BS at equal distance. Distance can be varied dynamically. Map is sized automatically.
Keep the same layout as old medium env here: A, B on same horizontal axis. C above in the middle
"""
# calculate vertical distance from A, B to C using Pythagoras
y_dist = np.sqrt(bs_dist ** 2 - (bs_dist / 2) ** 2)
# derive map size from BS distance and distance to border
map_width = 2 * dist_to_border + bs_dist
map_height = 2 * dist_to_border + y_dist
map = Map(width=map_width, height=map_height)
# BS A is located at bottom left corner with specified distance to border
bs1 = Basestation('A', Point(dist_to_border, dist_to_border), get_sharing_for_bs(sharing_model, 0))
# other BS positions are derived accordingly
bs2 = Basestation('B', Point(dist_to_border + bs_dist, dist_to_border), get_sharing_for_bs(sharing_model, 1))
bs3 = Basestation('C', Point(dist_to_border + (bs_dist / 2), dist_to_border + y_dist), get_sharing_for_bs(sharing_model, 2))
return map, [bs1, bs2, bs3]
def create_large_map(sharing_model):
"""
Create larger map with 7 BS that are arranged in a typical hexagonal structure.
:returns: Tuple(map, bs_list)
"""
map = Map(width=230, height=260)
bs_list = [
# center
Basestation('A', Point(115, 130), get_sharing_for_bs(sharing_model, 0)),
# top left, counter-clockwise
Basestation('B', Point(30, 80), get_sharing_for_bs(sharing_model, 1)),
Basestation('C', Point(115, 30), get_sharing_for_bs(sharing_model, 2)),
Basestation('D', Point(200, 80), get_sharing_for_bs(sharing_model, 3)),
Basestation('E', Point(200, 180), get_sharing_for_bs(sharing_model, 4)),
Basestation('F', Point(115, 230), get_sharing_for_bs(sharing_model, 5)),
Basestation('G', Point(30, 180), get_sharing_for_bs(sharing_model, 6)),
]
return map, bs_list
def create_dyn_large_map(sharing_model, num_bs, dist_to_border=10):
assert 1 <= num_bs <= 7, "Only support 1-7 BS in large env"
_, bs_list = create_large_map(sharing_model)
# take only selected BS
bs_list = bs_list[:num_bs]
# create map with size according to BS positions
max_x, max_y = None, None
for bs in bs_list:
if max_x is None or bs.pos.x > max_x:
max_x = bs.pos.x
if max_y is None or bs.pos.y > max_y:
max_y = bs.pos.y
map = Map(width=max_x + dist_to_border, height=max_y + dist_to_border)
return map, bs_list
def create_ues(map, num_static_ues, num_slow_ues, num_fast_ues, util_func):
"""Create custom number of slow/fast UEs on the given map. Return UE list"""
ue_list = []
id = 1
for i in range(num_static_ues):
ue_list.append(User(str(id), map, pos_x='random', pos_y='random', movement=RandomWaypoint(map, velocity=0),
util_func=util_func))
id += 1
for i in range(num_slow_ues):
ue_list.append(User(str(id), map, pos_x='random', pos_y='random', movement=RandomWaypoint(map, velocity='slow'),
util_func=util_func))
id += 1
for i in range(num_fast_ues):
ue_list.append(User(str(id), map, pos_x='random', pos_y='random', movement=RandomWaypoint(map, velocity='fast'),
util_func=util_func))
id += 1
return ue_list
def create_custom_env(sharing_model):
"""Hand-created custom env. For demos or specific experiments."""
# map with 4 BS at distance of 100; distance 10 to border of map
map = Map(width=194, height=120)
bs_list = [
# left
Basestation('A', Point(10, 60), get_sharing_for_bs(sharing_model, 0)),
# counter-clockwise
Basestation('B', Point(97, 10), get_sharing_for_bs(sharing_model, 1)),
Basestation('C', Point(184, 60), get_sharing_for_bs(sharing_model, 2)),
Basestation('D', Point(97, 110), get_sharing_for_bs(sharing_model, 3)),
]
return map, bs_list
def get_env(map_size, bs_dist, num_static_ues, num_slow_ues, num_fast_ues, sharing_model, util_func, num_bs=None):
"""Create and return the environment corresponding to the given map_size"""
assert map_size in SUPPORTED_ENVS, f"Environment {map_size} is not one of {SUPPORTED_ENVS}."
assert util_func in SUPPORTED_UTILITIES, \
f"Utility function {util_func} not supported. Supported: {SUPPORTED_UTILITIES}"
# create map and BS list
map, bs_list = None, None
if map_size == 'small':
map, bs_list = create_small_map(sharing_model)
elif map_size == 'medium':
map, bs_list = create_dyn_medium_map(sharing_model, bs_dist=bs_dist)
elif map_size == 'large':
if num_bs is None:
map, bs_list = create_large_map(sharing_model)
else:
map, bs_list = create_dyn_large_map(sharing_model, num_bs)
elif map_size == 'custom':
map, bs_list = create_custom_env(sharing_model)
# create UEs
ue_list = create_ues(map, num_static_ues, num_slow_ues, num_fast_ues, util_func)
return map, ue_list, bs_list
def get_ue_arrival(ue_arrival_name):
"""Get the dict defining UE arrival over time based on the name provided via CLI"""
assert ue_arrival_name in SUPPORTED_UE_ARRIVAL
if ue_arrival_name is None:
return None
if ue_arrival_name == "oneupdown":
return {10: 1, 30: -1}
if ue_arrival_name == "updownupdown":
return {10: 1, 20: -1, 30: 1, 40: -1}
if ue_arrival_name == "3up2down":
return {10: 3, 30: -2}
if ue_arrival_name == "updown":
return {10: 1, 15: 1, 20: 1, 40: 1, 50: -1, 60: -1}
if ue_arrival_name == "largeupdown":
return {
20: 1, 30: -1, 40: 1,
# large increase up to 12 (starting at 1)
45: 1, 50: 1, 55: 2, 60: 3, 65: 2, 70: 1,
# large decrease down to 1
75: -1, 80: -2, 85: -3, 90: -3, 95: -2
}
raise ValueError(f"Unknown UE arrival name: {ue_arrival_name}")
def create_env_config(cli_args):
"""
Create environment and RLlib config based on passed CLI args. Return config.
:param cli_args: Parsed CLI args
:return: The complete config for an RLlib agent, including the env & env_config
"""
env_class = get_env_class(cli_args.agent)
map, ue_list, bs_list = get_env(cli_args.env, cli_args.bs_dist, cli_args.static_ues, cli_args.slow_ues,
cli_args.fast_ues, cli_args.sharing, cli_args.util, num_bs=cli_args.num_bs)
# this is for DrEnv and step utility
# env_config = {
# 'episode_length': eps_length, 'seed': seed,
# 'map': map, 'bs_list': bs_list, 'ue_list': ue_list, 'dr_cutoff': 'auto', 'sub_req_dr': True,
# 'curr_dr_obs': False, 'ues_at_bs_obs': False, 'dist_obs': False, 'next_dist_obs': False
# }
# this is for the custom NormEnv and log utility
env_config = {
'episode_length': cli_args.eps_length, 'seed': cli_args.seed, 'map': map, 'bs_list': bs_list, 'ue_list': ue_list,
'rand_episodes': cli_args.rand_train, 'new_ue_interval': cli_args.new_ue_interval, 'reward': cli_args.reward,
'max_ues': cli_args.max_ues, 'ue_arrival': get_ue_arrival(cli_args.ue_arrival),
# if enabled log_metrics: log metrics even during training --> visible on tensorboard
# if disabled: log just during testing --> probably slightly faster training with less memory
'log_metrics': True,
# custom animation rendering
'dashboard': cli_args.dashboard, 'ue_details': cli_args.ue_details,
}
# convert ue_arrival sequence to str keys as required by RLlib: https://github.com/ray-project/ray/issues/16215
if env_config['ue_arrival'] is not None:
env_config['ue_arrival'] = {str(k): v for k, v in env_config['ue_arrival'].items()}
# create and return the config
config = DEFAULT_CONFIG.copy()
# discount factor (default 0.99)
# config['gamma'] = 0.5
# 0 = no workers/actors at all --> low overhead for short debugging; 2+ workers to accelerate long training
config['num_workers'] = cli_args.workers
config['seed'] = cli_args.seed
# write training stats to file under ~/ray_results (default: False)
config['monitor'] = True
config['train_batch_size'] = cli_args.batch_size # default: 4000; default in stable_baselines: 128
# auto normalize obserations by subtracting mean and dividing by std (default: "NoFilter")
# config['observation_filter'] = "MeanStdFilter"
# NN settings: https://docs.ray.io/en/latest/rllib-models.html#built-in-model-parameters
# configure the size of the neural network's hidden layers; default: [256, 256]
# config['model']['fcnet_hiddens'] = [512, 512, 512]
# LSTM settings
config['model']['use_lstm'] = cli_args.lstm
# | |
import os
import re
import zlib
from typing import List, Dict, Union, Optional, Generator, Iterable
from collections import defaultdict
import logging
import requests
from .data import Language
from .tools import write_file_or_remove
from .storage import (
BaseVersion,
Storage,
Patch,
PatchElement,
PatchVersion,
get_system_yaml_version,
get_exe_version,
)
logger = logging.getLogger(__name__)
class RadsVersion(BaseVersion):
"""Wrapper class for version strings used by RADS
Solutions and projects all have individual version numbers (e.g. "0.0.1.30").
The version numbers are actually 32-bit unsigned integers represented using dot-notation, exactly the same as the
notation used for IPv4 addresses. Notably, each individual number caps at 255, so the version after 0.0.0.255 is
0.0.1.0.
"""
def __init__(self, v: Union[str, tuple]):
super().__init__(v)
assert len(self.t) == 4, "invalid RADS version format: "
class RadsStorage(Storage):
"""
Storage based on RADS structure
Configuration options:
url -- storage URL (see examples below)
cdn -- 'default', 'kr' or 'pbe' (incompatible with 'url')
"""
storage_type = 'rads'
# all available values are in system.yaml
# values in use are in RADS/system/system.cfg
# region is ignored here (it is not actually needed)
DOWNLOAD_URL = "l3cdn.riotgames.com"
DOWNLOAD_PATH = "/releases/live"
DOWNLOAD_PATH_KR = "/KR_CBT"
DOWNLOAD_PATH_PBE = "/releases/pbe"
URL_DEFAULT = f"http://{DOWNLOAD_URL}{DOWNLOAD_PATH}/"
URL_KR = f"http://{DOWNLOAD_URL}{DOWNLOAD_PATH_KR}/"
URL_PBE = f"http://{DOWNLOAD_URL}{DOWNLOAD_PATH_PBE}/"
def __init__(self, path, url=None):
if url is None:
url = self.URL_DEFAULT
super().__init__(path, url)
@classmethod
def from_conf_data(cls, conf):
if 'cdn' in conf:
if 'url' in conf:
raise ValueError("'url' and 'cdn' are mutually exclusive")
url = getattr(cls, f"URL_{conf['cdn']}".upper())
else:
url = conf.get('url')
return cls(conf['path'], url)
def list_projects(self) -> List['RadsProject']:
"""List projects present in storage"""
ret = []
base = self.fspath("projects")
for name in os.listdir(base):
if os.path.isdir(f"{base}/{name}/releases"):
ret.append(RadsProject(self, name))
return ret
def list_solutions(self) -> List['RadsSolution']:
"""List solutions present in storage"""
ret = []
base = self.fspath("solutions")
for name in os.listdir(base):
if os.path.isdir(f"{base}/{name}/releases"):
ret.append(RadsSolution(self, name))
return ret
def patch_elements(self, stored=False):
solution_names = ('league_client_sln', 'lol_game_client_sln')
# peek next element for each solution
class Peeker:
def __init__(self, it):
self.it = it
self.cur = None
def peek(self):
if self.cur is None:
try:
self.cur = next(self.it)
except StopIteration:
pass
return self.cur
def consume(self):
assert self.cur is not None
self.cur = None
# drop solution versions without a patch
# convert them to patch elements
def gen_solution_elements(name):
solution = RadsSolution(self, name)
for sv in solution.versions(stored=stored):
patch = sv.patch_version()
if patch is None:
continue
yield RadsPatchElement(sv)
# for each solution, peek the next elements to yield the highest version
peekers = [Peeker(gen_solution_elements(name)) for name in solution_names]
while True:
best_peeker, best_elem = None, None
for peeker in peekers:
elem = peeker.peek()
if elem is None:
continue
if best_elem is None or elem.version > best_elem.version:
best_peeker, best_elem = peeker, elem
if best_peeker is None:
break # exhausted
yield best_elem
best_peeker.consume()
class RadsSolution:
"""A Solution has multiple versions and contains many Projects.
The Riot Application Distribution System (RADS) has two Solutions: `league_client_sln` and `lol_game_client_sln`.
The 'league_client_sln' contains data the client (LCU), and the `lol_game_client_sln` contains data for the game client.
These classes will likely work with other solutions, although some functionality may need to be extended.
There are multiple versions of a given solution, which can be accessed via the `.versions()` method.
All versions of a solution can be downloaded and extracted via the `.download()` method.
Each version of a solution contains multiple projects pertaining to different locales.
"""
def __init__(self, storage: RadsStorage, name):
self.storage = storage
self.path = f"solutions/{name}/releases"
self.name = name
def __str__(self):
return f"rads:{self.name}"
def __repr__(self):
return f"<{self.__class__.__qualname__} {self.name}>"
def __eq__(self, other):
if isinstance(other, RadsSolution):
return self.name == other.name
return False
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
if isinstance(other, RadsSolution):
return self.name < other.name
return NotImplemented
def versions(self, stored=False) -> List['RadsSolutionVersion']:
"""Retrieve a sorted list of versions of this solution
If stored is True, only versions in storage are used (to avoid downloading new files).
"""
if stored:
fspath = self.storage.fspath(self.path)
if not os.path.isdir(fspath):
return [] # solution not in storage
listing = []
for path in os.listdir(fspath):
if not os.path.isdir(os.path.join(fspath, path)):
continue
listing.append(path)
else:
logger.debug(f"retrieve versions of {self}")
listing = self.storage.request_text(f"{self.path}/releaselisting").splitlines()
return sorted(RadsSolutionVersion(self, RadsVersion(l)) for l in listing)
def download(self, langs):
for v in self.versions():
v.download(langs)
class RadsSolutionVersion:
"""A single version of a RadsSolution.
Each RadsSolutionVersion contains data for multiple projects, accessible via the `RadsSolutionVersion.projects` method.
There is one "main" project, and one project for each language.
The data contained in a RadsSolutionVersion can be downloaded and extracted via the `.download()` method.
"""
def __init__(self, solution: RadsSolution, version: 'RadsVersion'):
self.path = f"{solution.path}/{version}"
self.solution = solution
self.version = version
def __str__(self):
return f"{self.solution}={self.version}"
def __repr__(self):
return f"<{self.__class__.__qualname__} {self.solution.name}={self.version}>"
def __eq__(self, other):
if isinstance(other, RadsSolutionVersion):
return self.solution == other.solution and self.version == other.version
return False
def __hash__(self):
return hash((self.solution, self.version))
def __lt__(self, other):
if isinstance(other, RadsSolutionVersion):
if self.solution < other.solution:
return True
elif self.solution == other.solution:
return self.version > other.version
else:
return False
return NotImplemented
def dependencies(self) -> Dict[Union[Language, None], List['RadsProjectVersion']]:
"""Parse dependencies from the solutionmanifest
Return a map of project versions for each language.
The entry None is set to all required project versions.
"""
logger.debug(f"retrieve dependencies of {self}")
path = f"{self.path}/solutionmanifest"
self.solution.storage.download(path, path)
with open(self.solution.storage.fspath(path)) as f:
lines = f.read().splitlines()
assert lines[0] == "RADS Solution Manifest", "unexpected solutionmanifest magic line"
assert lines[1] == "1.0.0.0", "unexpected solutionmanifest version"
assert lines[2] == self.solution.name, "solution name mismatch in solutionmanifest header"
assert lines[3] == self.version, "solution version mismatch in solutionmanifest header"
idx = 4
required_projects = [] # [name, ...]
projects = {} # {name: RadsProjectVersion}
nprojects, idx = int(lines[idx]), idx + 1
for _ in range(nprojects):
(name, version, unk1, unk2), idx = lines[idx:idx+4], idx + 4
unk1, unk2 = int(unk1), int(unk2)
if unk1 == 0:
required_projects.append(name)
else:
assert unk1 == 10
assert unk2 == 0
projects[name] = RadsProjectVersion(RadsProject(self.solution.storage, name), RadsVersion(version))
langs = {} # {Language: [RadsProjectVersion, ...]}
nlangs, idx = int(lines[idx]), idx + 1
for _ in range(nlangs):
(lang, unk1, ndeps), idx = lines[idx:idx+3], idx + 3
unk1, ndeps = int(unk1), int(ndeps)
assert unk1 == 0
deps, idx = lines[idx:idx+ndeps], idx + ndeps
langs[Language(lang)] = [projects[name] for name in deps]
langs[None] = list(projects[name] for name in required_projects)
return langs
def projects(self, langs=True) -> List['RadsProjectVersion']:
"""Return a list of projects for provided language(s)"""
dependencies = self.dependencies()
if langs is False:
return dependencies[None]
elif langs is True:
return list({pv for pvs in dependencies.values() for pv in pvs})
elif isinstance(langs, Language):
return dependencies[langs]
else:
return list({pv for lang in langs for pv in dependencies[lang]})
def filepaths(self, langs) -> Generator[str, None, None]:
"""Generate the extract path of files in the solution version"""
for pv in self.projects(langs):
yield from pv.filepaths()
def download(self, langs=True):
"""Download solution version files"""
logger.info(f"downloading solution {self}")
for pv in self.projects(langs):
pv.download()
def patch_version(self) -> Optional[PatchVersion]:
"""Return patch version or None if there is None
This method reads/writes version from/to cache.
"""
# for PBE: version is always "main"
if self.solution.storage.url == RadsStorage.URL_PBE:
return PatchVersion("main")
cache = self.solution.storage.fspath(f"{self.path}/_patch_version")
if os.path.isfile(cache):
logger.debug(f"retrieving patch version for {self} from cache")
with open(cache) as f:
version = f.read().strip()
version = PatchVersion(version) if version else None
else:
version = self._retrieve_patch_version()
if version is None:
logger.warning(f"failed to retrieve patch version for {self}")
else:
with open(cache, 'w') as f:
f.write(f"{version}\n")
return version
def _retrieve_patch_version(self) -> Optional[PatchVersion]:
"""Retrieve patch version from game files (no cache handling)
Return None if there is no patch version (because files are not
available anymore on Riot's CDN).
Raise an exception if patch version cannot be retrieved.
"""
logger.debug(f"retrieving patch version for {self}")
retrievers = {
# solution_name: (project_name, file_name, extractor)
'league_client_sln': (
'league_client',
'system.yaml',
get_system_yaml_version,
),
'lol_game_client_sln': (
'lol_game_client',
'League of Legends.exe',
get_exe_version,
),
}
try:
project_name, file_name, extractor = retrievers[self.solution.name]
except KeyError:
raise RuntimeError(f"no known way to retrieve patch version for solution {self.solution.name}")
for pv in self.projects(False):
if pv.project.name == project_name:
break
else:
raise ValueError(f"{project_name} project not found for {self}")
try:
filepaths = pv.filepaths()
except requests.exceptions.HTTPError as e:
# some packagemanifest files are not available anymore
# for these project versions, there is no patch version
if e.response is not None and e.response.status_code == 404:
return None
| |
<gh_stars>0
'''
File name: sorting.py
Date created: Aug 22, 2019
Objective:
Sort RNA-seq expression png image with TCGA metadata
command: python3 sorting.py \
-f="/mnt/IKEGAMI/R/expression_profile/files.json" \
-C="/mnt/IKEGAMI/R/expression_profile/clinical.tsv" \
-s="/mnt/IKEGAMI/R/expression_profile/png_anal" \
-F="/mnt/IKEGAMI/R/expression_profile/png_rand/*.FPKM.txt.npy"
'''
import argparse
import numpy as np
from glob import glob
import os
import sys
import cv2
import json
import random
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--files')
parser.add_argument('-C', '--clinical')
parser.add_argument('-s', '--sorted_TCGA')
parser.add_argument('-p', '--sorted_patho')
parser.add_argument('-F', '--datapath')
opts = parser.parse_args()
# get data file
files = glob(opts.datapath)
df_clin = pd.read_table(opts.clinical, header=0, index_col=0)
f = open(opts.files, 'r')
jdata = json.load(f)
cnt = 0
TCGA = 1
if not os.path.exists(opts.sorted_patho):
os.makedirs(opts.sorted_patho)
if not os.path.exists(opts.sorted_TCGA):
TCGA = 0
for fileName in files:
imgRootName = os.path.basename(fileName).replace(".FPKM.txt.npy", "")[-36:]
ID = -1
for TotImg in range(len(jdata)):
if jdata[TotImg]['file_name'].startswith(imgRootName):
case = jdata[TotImg]['cases'][0]['case_id']
ID = TotImg
break
if ID == -1:
print("File name not found in json file.")
continue
cnt += 1
print("***** ", fileName, cnt)
project = df_clin.loc[case].loc["project_id"]
# gender = df_clin.loc[case].loc["gender"]
# vital = df_clin.loc[case].loc["vital_status"]
origin = df_clin.loc[case].loc["tissue_or_organ_of_origin"]
# prior = df_clin.loc[case].loc["prior_malignancy"]
patho = df_clin.loc[case].loc["primary_diagnosis"]
# stage = df_clin.loc[case].loc["tumor_stage"]
# ann_stage = df_clin.loc[case].loc["ann_arbor_clinical_stage"]
# vital = vital.replace(" ", "_")
origin = origin.replace(" ", "_")
origin = origin.replace(",_NOS", "")
# prior = prior.replace(" ", "_")
# stage = str(stage)
# stage = stage.replace(" ", "")
# stage = stage.replace("stage", "")
# stage = stage.replace('a','')
# stage = stage.replace('b','')
# ann_stage = ann_stage.replace(" ", "")
# ann_stage = ann_stage.replace("stage", "")
# ann_stage = ann_stage.replace('a','')
# ann_stage = ann_stage.replace('b','')
# pathology classification based on OncoTree
# exclude: Acute Leukemias of Ambiguous Lineage, because of its ambiguous disease entity
# exclude: Undifferentiated pleomorphic sarcoma, called a diagnosis of exclusion
patho = project + '_' + patho
patho = patho.replace(" ", "_")
patho = patho.replace(",_NOS", "")
patho = patho.replace("BEATAML1.0-COHORT_Acute_monoblastic_and_monocytic_leukemia", "Acute Myeloid Leukemia")
patho = patho.replace("BEATAML1.0-COHORT_Acute_myeloid_leukemia,_CBF-beta", "Acute Myeloid Leukemia")
patho = patho.replace("BEATAML1.0-COHORT_Acute_myeloid_leukemia_with_inv(3)(q21q26.2)_or_t(3;3)(q21;q26.2);_RPN1-EVI1", "Acute Myeloid Leukemia")
patho = patho.replace("BEATAML1.0-COHORT_Acute_myeloid_leukemia_with_mutated_CEBPA", "Acute Myeloid Leukemia")
patho = patho.replace("BEATAML1.0-COHORT_Mixed_phenotype_acute_leukemia,_T", "other")
patho = patho.replace("BEATAML1.0-COHORT_Mixed_phenotype_acute_leukemia,_B", "other")
patho = patho.replace("BEATAML1.0-COHORT_Acute_myeloid_leukemia_with_mutated_NPM1", "Acute Myeloid Leukemia")
patho = patho.replace("BEATAML1.0-COHORT_Acute_myeloid_leukemia_with_myelodysplasia-related_changes", "Acute Myeloid Leukemia")
patho = patho.replace("BEATAML1.0-COHORT_Acute_myeloid_leukemia_with_t(8;21)(q22;q22);_RUNX1-RUNX1T1", "Acute Myeloid Leukemia")
patho = patho.replace("BEATAML1.0-COHORT_Acute_myeloid_leukemia_with_t(9;11)(p22;q23);_MLLT3-MLL", "Acute Myeloid Leukemia")
patho = patho.replace("BEATAML1.0-COHORT_Myeloid_sarcoma", "other")
patho = patho.replace("BEATAML1.0-COHORT_Acute_myelomonocytic_leukemia", "Acute Myeloid Leukemia")
patho = patho.replace("BEATAML1.0-COHORT_Acute_promyelocytic_leukaemia,_PML-RAR-alpha", "Acute Myeloid Leukemia")
patho = patho.replace("BEATAML1.0-COHORT_Acute_erythroid_leukaemia", "Acute Myeloid Leukemia")
patho = patho.replace("BEATAML1.0-COHORT_Myeloid_leukemia_associated_with_Down_Syndrome", "Acute Myeloid Leukemia")
patho = patho.replace("BEATAML1.0-COHORT_Acute_myeloid_leukemia", "Acute Myeloid Leukemia")
patho = patho.replace("CGCI-BLGSP_--", "other")
patho = patho.replace("CGCI-BLGSP_Burkitt-like_lymphoma", "other")
patho = patho.replace("CGCI-BLGSP_Burkitt_lymphoma", "Burkitt_lymphoma")
patho = patho.replace("CPTAC-3_Adenocarcinoma", "Lung Adenocarcinoma")
patho = patho.replace("CPTAC-3_Endometrioid_adenocarcinoma", "Uterine Endometrial Carcinoma")
patho = patho.replace("CPTAC-3_Renal_cell_carcinoma", "Kidney_Renal Clear Cell Carcinoma")
patho = patho.replace("CTSP-DLBCL1_Diffuse_large_B-cell_lymphoma", "Diffuse Large B-Cell Lymphoma")
patho = patho.replace("HCMI-CMDC_Adenocarcinoma", origin + " Adenocarcinoma")
patho = patho.replace("HCMI-CMDC_Glioblastoma", "Brain_Glioblastoma")
patho = patho.replace("MMRF-COMMPASS_--", "other")
patho = patho.replace("MMRF-COMMPASS_Multiple_myeloma", "Multiple_Myeloma")
patho = patho.replace("NCICCR-DLBCL_Diffuse_large_B-cell_lymphoma", "Diffuse Large B-Cell Lymphoma")
patho = patho.replace("TARGET-ALL-P1_Mixed_phenotype_acute_leukemia,_T/myeloid", "other")
patho = patho.replace("TARGET-ALL-P1_T_lymphoblastic_leukemia/lymphoma", "T-Lymphoblastic Leukemia-Lymphoma")
patho = patho.replace("TARGET-ALL-P1_Precursor_B-cell_lymphoblastic_leukemia", "B-Lymphoblastic Leukemia-Lymphoma")
patho = patho.replace("TARGET-ALL-P1_Mixed_phenotype_acute_leukemia,_B/myeloid", "other")
patho = patho.replace("TARGET-ALL-P1_Mixed_phenotype_acute_leukemia_with_t(v;11q23);_MLL_rearranged", "other")
patho = patho.replace("TARGET-ALL-P1_Undifferentiated_leukaemia", "other")
patho = patho.replace("TARGET-ALL-P1_Mixed_phenotype_acute_leukemia_with_t(9;22)(q34;q11.2);_BCR-ABL1", "other")
patho = patho.replace("TARGET-ALL-P1_Leukemia", "other")
patho = patho.replace("TARGET-ALL-P1_B_lymphoblastic_leukemia/lymphoma", "B-Lymphoblastic Leukemia-Lymphoma")
patho = patho.replace("TARGET-ALL-P1_Juvenile_myelomonocytic_leukemia", "other")
patho = patho.replace("TARGET-ALL-P1_--", "other")
patho = patho.replace("TARGET-ALL-P2_Mixed_phenotype_acute_leukemia,_T/myeloid", "other")
patho = patho.replace("TARGET-ALL-P2_T_lymphoblastic_leukemia/lymphoma", "T-Lymphoblastic Leukemia-Lymphoma")
patho = patho.replace("TARGET-ALL-P2_Precursor_B-cell_lymphoblastic_leukemia", "B-Lymphoblastic Leukemia-Lymphoma")
patho = patho.replace("TARGET-ALL-P2_Mixed_phenotype_acute_leukemia,_B/myeloid", "other")
patho = patho.replace("TARGET-ALL-P2_Mixed_phenotype_acute_leukemia_with_t(v;11q23);_MLL_rearranged", "other")
patho = patho.replace("TARGET-ALL-P2_Undifferentiated_leukaemia", "other")
patho = patho.replace("TARGET-ALL-P2_Mixed_phenotype_acute_leukemia_with_t(9;22)(q34;q11.2);_BCR-ABL1", "other")
patho = patho.replace("TARGET-ALL-P2_Leukemia", "other")
patho = patho.replace("TARGET-ALL-P2_B_lymphoblastic_leukemia/lymphoma", "B-Lymphoblastic Leukemia-Lymphoma")
patho = patho.replace("TARGET-ALL-P2_Juvenile_myelomonocytic_leukemia", "other")
patho = patho.replace("TARGET-ALL-P3_Mixed_phenotype_acute_leukemia,_T/myeloid", "other")
patho = patho.replace("TARGET-ALL-P3_T_lymphoblastic_leukemia/lymphoma", "T-Lymphoblastic Leukemia-Lymphoma")
patho = patho.replace("TARGET-ALL-P3_Precursor_B-cell_lymphoblastic_leukemia", "B-Lymphoblastic Leukemia-Lymphoma")
patho = patho.replace("TARGET-ALL-P3_Mixed_phenotype_acute_leukemia,_B/myeloid", "other")
patho = patho.replace("TARGET-ALL-P3_Mixed_phenotype_acute_leukemia_with_t(v;11q23);_MLL_rearranged", "other")
patho = patho.replace("TARGET-ALL-P3_Undifferentiated_leukaemia", "other")
patho = patho.replace("TARGET-ALL-P3_Mixed_phenotype_acute_leukemia_with_t(9;22)(q34;q11.2);_BCR-ABL1", "other")
patho = patho.replace("TARGET-ALL-P3_Leukemia", "other")
patho = patho.replace("TARGET-ALL-P3_B_lymphoblastic_leukemia/lymphoma", "B-Lymphoblastic Leukemia-Lymphoma")
patho = patho.replace("TARGET-ALL-P3_Juvenile_myelomonocytic_leukemia", "other")
patho = patho.replace("TARGET-ALL-P3_Not_Reported", "other")
patho = patho.replace("TARGET-ALL-P3_Mixed_phenotype_acute_leukemia_with_t(9;22)(q34;q11.2);_BCR-ABL1", "B-Lymphoblastic Leukemia-Lymphoma")
patho = patho.replace("TARGET-ALL-P3_Mixed_phenotype_acute_leukemia_with_t(v;11q23);_MLL_rearranged", "B-Lymphoblastic Leukemia-Lymphoma")
patho = patho.replace("TARGET-ALL-P3_Acute_myeloid_leukemia", "Acute_Myeloid_Leukemia")
patho = patho.replace("TARGET-AML_Acute_myeloid_leukemia", "Acute Myeloid Leukemia")
patho = patho.replace("TARGET-CCSK_Clear_cell_sarcoma_of_kidney", "Kidney_Clear Cell Sarcoma of Kidney")
patho = patho.replace("TARGET-NBL_Neuroblastoma", "Neuroblastoma-Ganglioneuroblastoma")
patho = patho.replace("TARGET-NBL_Ganglioneuroblastoma", "Neuroblastoma-Ganglioneuroblastoma")
patho = patho.replace("TARGET-OS_Osteosarcoma", "Bone_Osteosarcoma")
patho = patho.replace("TARGET-RT_Malignant_rhabdoid_tumor", "Rhabdoid Cancer")
patho = patho.replace("TARGET-WT_Wilms_tumor", "Wilms Tumor")
patho = patho.replace("TCGA-ACC_Adrenal_cortical_carcinoma", "Adrenocortical Carcinoma")
patho = patho.replace("TCGA-BLCA_Carcinoma", "other")
patho = patho.replace("TCGA-BLCA_Papillary_adenocarcinoma", "other")
patho = patho.replace("TCGA-BLCA_Papillary_transitional_cell_carcinoma", "Bladder Urothelial Carcinoma")
patho = patho.replace("TCGA-BLCA_Squamous_cell_carcinoma", "other")
patho = patho.replace("TCGA-BLCA_Transitional_cell_carcinoma", "Bladder Urothelial Carcinoma")
patho = patho.replace("TCGA-BRCA_--", "other")
patho = patho.replace("TCGA-BRCA_Adenoid_cystic_carcinoma", "Breast_Invasive Breast Carcinoma")
patho = patho.replace("TCGA-BRCA_Apocrine_adenocarcinoma", "other")
patho = patho.replace("TCGA-BRCA_Basal_cell_carcinoma", "other")
patho = patho.replace("TCGA-BRCA_Carcinoma", "other")
patho = patho.replace("TCGA-BRCA_Cribriform_carcinoma", "other")
patho = patho.replace("TCGA-BRCA_Infiltrating_duct_and_lobular_carcinoma", "Breast_Invasive Breast Carcinoma")
patho = patho.replace("TCGA-BRCA_Infiltrating_duct_carcinoma", "Breast_Invasive Breast Carcinoma")
patho = patho.replace("TCGA-BRCA_Infiltrating_duct_mixed_with_other_types_of_carcinoma", "Breast_Invasive Breast Carcinoma")
patho = patho.replace("TCGA-BRCA_Infiltrating_lobular_mixed_with_other_types_of_carcinoma", "Breast_Invasive Breast Carcinoma")
patho = patho.replace("TCGA-BRCA_Intraductal_micropapillary_carcinoma", "Breast_Invasive Breast Carcinoma")
patho = patho.replace("TCGA-BRCA_Intraductal_papillary_adenocarcinoma_with_invasion", "Breast_Invasive Breast Carcinoma")
patho = patho.replace("TCGA-BRCA_Large_cell_neuroendocrine_carcinoma", "other")
patho = patho.replace("TCGA-BRCA_Lobular_carcinoma", "Breast_Invasive Breast Carcinoma")
patho = patho.replace("TCGA-BRCA_Medullary_carcinoma", "other")
patho = patho.replace("TCGA-BRCA_Metaplastic_carcinoma", "other")
patho = patho.replace("TCGA-BRCA_Mucinous_adenocarcinoma", "Breast_Invasive Breast Carcinoma")
patho = patho.replace("TCGA-BRCA_Paget_disease_and_infiltrating_duct_carcinoma_of_breast", "Breast_Invasive Breast Carcinoma")
patho = patho.replace("TCGA-BRCA_Papillary_carcinoma", "Breast_Invasive Breast Carcinoma")
patho = patho.replace("TCGA-BRCA_Phyllodes_tumor", "other")
patho = patho.replace("TCGA-BRCA_Pleomorphic_carcinoma", "other")
patho = patho.replace("TCGA-BRCA_Secretory_carcinoma_of_breast", "other")
patho = patho.replace("TCGA-BRCA_Tubular_adenocarcinoma", "Breast_Invasive Breast Carcinoma")
patho = patho.replace("TCGA-CESC_Adenocarcinoma", "other")
patho = patho.replace("TCGA-CESC_Adenosquamous_carcinoma", "other")
patho = patho.replace("TCGA-CESC_Basaloid_squamous_cell_carcinoma", "Cervical Squamous Cell Carcinoma")
patho = patho.replace("TCGA-CESC_Endometrioid_adenocarcinoma", "Cervical Adenocarcinoma")
patho = patho.replace("TCGA-CESC_Mucinous_adenocarcinoma", "Cervical Adenocarcinoma")
patho = patho.replace("TCGA-CESC_Papillary_squamous_cell_carcinoma", "Cervical Squamous Cell Carcinoma")
patho = patho.replace("TCGA-CESC_Squamous_cell_carcinoma", "Cervical Squamous Cell Carcinoma")
patho = patho.replace("TCGA-CHOL_Cholangiocarcinoma", "Cholangiocarcinoma")
patho = patho.replace("TCGA-COAD_--", "other")
patho = patho.replace("TCGA-COAD_Adenocarcinoma", "Colorectal Adenocarcinoma")
patho = patho.replace("TCGA-COAD_Adenosquamous_carcinoma", "other")
patho = patho.replace("TCGA-COAD_Carcinoma", "other")
patho = patho.replace("TCGA-COAD_Mucinous_adenocarcinoma", "Colorectal Adenocarcinoma")
patho = patho.replace("TCGA-COAD_Papillary_adenocarcinoma", "Colorectal Adenocarcinoma")
patho = patho.replace("TCGA-DLBC_Diffuse_large_B-cell_lymphoma", "Diffuse Large B-Cell Lymphoma")
patho = patho.replace("TCGA-DLBC_Malignant_lymphoma,_large_B-cell,_diffuse", "Diffuse Large B-Cell Lymphoma")
patho = patho.replace("TCGA-ESCA_Adenocarcinoma", "Esophagogastric Adenocarcinoma")
patho = patho.replace("TCGA-ESCA_Basaloid_squamous_cell_carcinoma", origin + " Squamous Cell Carcinoma")
patho = patho.replace("TCGA-ESCA_Mucinous_adenocarcinoma", "Esophagogastric Adenocarcinoma")
patho = patho.replace("TCGA-ESCA_Squamous_cell_carcinoma", origin + " Squamous Cell Carcinoma")
patho = patho.replace("TCGA-ESCA_Tubular_adenocarcinoma", "Esophagogastric Adenocarcinoma")
patho = patho.replace("TCGA-GBM_--", "other")
patho = patho.replace("TCGA-GBM_Glioblastoma", "Brain_Glioblastoma")
patho = patho.replace("TCGA-HNSC_Basaloid_squamous_cell_carcinoma", "Head and Neck Squamous Cell Carcinoma")
patho = patho.replace("TCGA-HNSC_Squamous_cell_carcinoma", "Head and Neck Squamous Cell Carcinoma")
patho = patho.replace("TCGA-KICH_Renal_cell_carcinoma", "Kidney_Chromophobe Renal Cell Carcinoma")
patho = patho.replace("TCGA-KIRC_Clear_cell_adenocarcinoma", "Kidney_Renal Clear Cell Carcinoma")
patho = patho.replace("TCGA-KIRC_Renal_cell_carcinoma", "other")
patho = patho.replace("TCGA-KIRP_Papillary_adenocarcinoma", "Kidney_Papillary Renal Cell Carcinoma")
patho = patho.replace("TCGA-LAML_Acute_myeloid_leukemia", "Acute Myeloid Leukemia")
patho = patho.replace("TCGA-LGG_--", "other")
patho = patho.replace("TCGA-LGG_Astrocytoma", "Brain_Oligodendroglioma-Astrocytoma")
patho = patho.replace("TCGA-LGG_Mixed_glioma", "Brain_Oligodendroglioma-Astrocytoma")
patho = patho.replace("TCGA-LGG_Oligodendroglioma", "Brain_Oligodendroglioma-Astrocytoma")
patho = patho.replace("TCGA-LIHC_Clear_cell_adenocarcinoma", "other")
patho = patho.replace("TCGA-LIHC_Combined_hepatocellular_carcinoma_and_cholangiocarcinoma", "other")
patho = patho.replace("TCGA-LIHC_Hepatocellular_carcinoma", "Hepatocellular Carcinoma")
patho = patho.replace("TCGA-LUAD_Acinar_cell_carcinoma", "Lung Adenocarcinoma")
patho = patho.replace("TCGA-LUAD_Adenocarcinoma", "Lung Adenocarcinoma")
patho = patho.replace("TCGA-LUAD_Bronchio-alveolar_carcinoma,_mucinous", "other")
patho = patho.replace("TCGA-LUAD_Bronchiolo-alveolar_adenocarcinoma", "other")
patho = patho.replace("TCGA-LUAD_Bronchiolo-alveolar_carcinoma,_non-mucinous", "other")
patho = patho.replace("TCGA-LUAD_Clear_cell_adenocarcinoma", "Lung Adenocarcinoma")
patho = patho.replace("TCGA-LUAD_Micropapillary_carcinoma", "Lung Adenocarcinoma")
patho = patho.replace("TCGA-LUAD_Mucinous_adenocarcinoma", "Lung Adenocarcinoma")
patho = patho.replace("TCGA-LUAD_Papillary_adenocarcinoma", "Lung Adenocarcinoma")
patho = patho.replace("TCGA-LUAD_Signet_ring_cell_carcinoma", "Lung Adenocarcinoma")
patho = patho.replace("TCGA-LUAD_Solid_carcinoma", "Lung Adenocarcinoma")
patho = patho.replace("TCGA-LUSC_Basaloid_squamous_cell_carcinoma", "Lung Squamous Cell Carcinoma")
patho = patho.replace("TCGA-LUSC_Papillary_squamous_cell_carcinoma", "Lung Squamous Cell Carcinoma")
patho = patho.replace("TCGA-LUSC_Squamous_cell_carcinoma,_keratinizing", "Lung Squamous Cell Carcinoma")
patho = patho.replace("TCGA-LUSC_Squamous_cell_carcinoma,_large_cell,_nonkeratinizing", "Lung Squamous Cell Carcinoma")
patho = patho.replace("TCGA-LUSC_Squamous_cell_carcinoma,_small_cell,_nonkeratinizing", "Lung Squamous Cell Carcinoma")
patho = patho.replace("TCGA-LUSC_Squamous_cell_carcinoma", "Lung Squamous Cell Carcinoma")
patho = patho.replace("TCGA-MESO_Epithelioid_mesothelioma,_malignant", "Pleural Mesothelioma")
patho = patho.replace("TCGA-MESO_Fibrous_mesothelioma,_malignant", "Pleural Mesothelioma")
patho = patho.replace("TCGA-MESO_Mesothelioma,_biphasic,_malignant", "Pleural Mesothelioma")
patho = patho.replace("TCGA-MESO_Mesothelioma,_malignant", "Pleural Mesothelioma")
patho = patho.replace("TCGA-OV_Papillary_serous_cystadenocarcinoma", "Ovary_Serous Ovarian Cancer")
patho = patho.replace("TCGA-OV_Serous_cystadenocarcinoma", "Ovary_Serous Ovarian Cancer")
patho = patho.replace("TCGA-PAAD_Adenocarcinoma_with_mixed_subtypes", "Pancreatic Adenocarcinoma")
patho = patho.replace("TCGA-PAAD_Adenocarcinoma", "Pancreatic Adenocarcinoma")
patho = patho.replace("TCGA-PAAD_Carcinoma,_undifferentiated", "other")
patho = patho.replace("TCGA-PAAD_Infiltrating_duct_carcinoma", "Pancreatic Adenocarcinoma")
patho = patho.replace("TCGA-PAAD_Mucinous_adenocarcinoma", "Pancreatic Adenocarcinoma")
patho = patho.replace("TCGA-PAAD_Neuroendocrine_carcinoma", "other")
patho = patho.replace("TCGA-PCPG_Extra-adrenal_paraganglioma,_malignant", "Pheochromocytoma-Paraganglioma")
patho = patho.replace("TCGA-PCPG_Extra-adrenal_paraganglioma", "Pheochromocytoma-Paraganglioma")
patho = patho.replace("TCGA-PCPG_Paraganglioma,_malignant", "Pheochromocytoma-Paraganglioma")
patho = patho.replace("TCGA-PCPG_Paraganglioma", "Pheochromocytoma-Paraganglioma")
patho = patho.replace("TCGA-PCPG_Pheochromocytoma,_malignant", "Pheochromocytoma-Paraganglioma")
patho = patho.replace("TCGA-PCPG_Pheochromocytoma", "Pheochromocytoma-Paraganglioma")
patho = patho.replace("TCGA-PRAD_Adenocarcinoma_with_mixed_subtypes", "Prostate Adenocarcinoma")
patho = patho.replace("TCGA-PRAD_Adenocarcinoma", "Prostate Adenocarcinoma")
patho = patho.replace("TCGA-PRAD_Infiltrating_duct_carcinoma", "Prostate Adenocarcinoma")
patho = patho.replace("TCGA-PRAD_Mucinous_adenocarcinoma", "Prostate Adenocarcinoma")
patho = patho.replace("TCGA-READ_--", "other")
patho = patho.replace("TCGA-READ_Adenocarcinoma_in_tubolovillous_adenoma", "Colorectal Adenocarcinoma")
patho = patho.replace("TCGA-READ_Adenocarcinoma_with_mixed_subtypes", "Colorectal Adenocarcinoma")
patho = patho.replace("TCGA-READ_Adenocarcinoma", "Colorectal Adenocarcinoma")
patho = patho.replace("TCGA-READ_Mucinous_adenocarcinoma", "Colorectal Adenocarcinoma")
patho = patho.replace("TCGA-READ_Tubular_adenocarcinoma", "Colorectal Adenocarcinoma")
patho = patho.replace("TCGA-SARC_Abdominal_fibromatosis", "other")
patho = patho.replace("TCGA-SARC_Aggressive_fibromatosis", "other")
patho = patho.replace("TCGA-SARC_Dedifferentiated_liposarcoma", "STS_Dedifferentiated liposarcoma")
patho = patho.replace("TCGA-SARC_Fibromyxosarcoma", "STS_Myxofibrosarcoma")
patho = patho.replace("TCGA-SARC_Giant_cell_sarcoma", "other")
patho = patho.replace("TCGA-SARC_Leiomyosarcoma", "STS_Leiomyosarcoma")
patho = patho.replace("TCGA-SARC_Liposarcoma,_well_differentiated", "other")
patho = patho.replace("TCGA-SARC_Malignant_fibrous_histiocytoma", "other")
patho = patho.replace("TCGA-SARC_Malignant_peripheral_nerve_sheath_tumor", "other")
patho = patho.replace("TCGA-SARC_Myxoid_leiomyosarcoma", "STS_Leiomyosarcoma")
patho = patho.replace("TCGA-SARC_Pleomorphic_liposarcoma", "other")
patho = patho.replace("TCGA-SARC_Synovial_sarcoma,_biphasic", "STS_Synovial Sarcoma")
patho = patho.replace("TCGA-SARC_Synovial_sarcoma,_spindle_cell", "STS_Synovial Sarcoma")
patho = patho.replace("TCGA-SARC_Synovial_sarcoma", "STS_Synovial Sarcoma")
patho = patho.replace("TCGA-SARC_Undifferentiated_sarcoma", "other")
patho = patho.replace("TCGA-SKCM_Acral_lentiginous_melanoma,_malignant", "Cutaneous Melanoma")
patho = patho.replace("TCGA-SKCM_Desmoplastic_melanoma,_malignant", "Cutaneous | |
meaningless for 3d diffraction
det = condor.Detector(solid_angle_correction=False, **self.param_detector)
# Atoms
atomic_numbers = map(lambda el: el.number, atoms)
atomic_numbers = [atomic_number + 5 for atomic_number in atomic_numbers]
# atomic_numbers = [82 for atomic_number in atomic_numbers]
# convert Angstrom to m (CONDOR uses meters)
atomic_positions = list(map(lambda pos: [pos.x * 1E-10, pos.y * 1E-10, pos.z * 1E-10], atoms))
par = condor.ParticleAtoms(atomic_numbers=atomic_numbers, atomic_positions=atomic_positions)
s = "particle_atoms"
condor_exp = condor.Experiment(src, {s: par}, det)
res = condor_exp.propagate3d()
# retrieve some physical quantities that might be useful for users
intensity = res["entry_1"]["data_1"]["data"]
fourier_space = res["entry_1"]["data_1"]["data_fourier"]
phases = np.angle(fourier_space) % (2 * np.pi)
# 3D diffraction calculation
real_space = np.fft.fftshift(np.fft.ifftn(np.fft.fftshift(res["entry_1"]["data_1"]["data_fourier"])))
window = get_window(self.window, self.n_px)
tot_density = window * real_space.real
center_of_mass = ndimage.measurements.center_of_mass(tot_density)
logger.debug("Tot density data dimensions: {}".format(tot_density.shape))
logger.debug("Center of mass of total density: {}".format(center_of_mass))
# take the fourier transform of structure in real_space
fft_coeff = fftpack.fftn(tot_density, shape=(self.nx_fft, self.ny_fft, self.nz_fft))
# now shift the quadrants around so that low spatial frequencies are in
# the center of the 2D fourier transformed image.
fft_coeff_shifted = fftpack.fftshift(fft_coeff)
# calculate a 3D power spectrum
power_spect = np.abs(fft_coeff_shifted) ** 2
if self.use_mask:
xc = (self.nx_fft - 1.0) / 2.0
yc = (self.ny_fft - 1.0) / 2.0
zc = (self.nz_fft - 1.0) / 2.0
# spherical mask
a, b, c = xc, yc, zc
x, y, z = np.ogrid[-a:self.nx_fft - a, -b:self.ny_fft - b, -c:self.nz_fft - c]
mask_int = x * x + y * y + z * z <= self.mask_r_min * self.mask_r_min
mask_out = x * x + y * y + z * z >= self.mask_r_max * self.mask_r_max
for i in range(self.nx_fft):
for j in range(self.ny_fft):
for k in range(self.nz_fft):
if mask_int[i, j, k]:
power_spect[i, j, k] = 0.0
if mask_out[i, j, k]:
power_spect[i, j, k] = 0.0
# cut the spectrum and keep only the relevant part for crystal-structure recognition of
# hexagonal closed packed (spacegroup=194)
# simple cubic (spacegroup=221)
# face centered cubic (spacegroup=225)
# diamond (spacegroup=227)
# body centered cubic (spacegroup=229)
# this interval (20:108) might need to be varied if other classes are added
power_spect_cut = power_spect[20:108, 20:108, 20:108]
# zoom by two times using spline interpolation
power_spect = ndimage.zoom(power_spect_cut, (2, 2, 2))
if save_diff_intensity:
np.save('/home/ziletti/Documents/calc_nomadml/rot_inv_3d/power_spect.npy', power_spect)
# power_spect.shape = 176, 176, 176
if plot_3d:
plot_3d_volume(power_spect)
vox = np.copy(power_spect)
logger.debug("nan in data: {}".format(np.count_nonzero(~np.isnan(vox))))
# optimized
# these specifications are valid for a power_spect = power_spect[20:108, 20:108, 20:108]
# and a magnification of 2
xyz_indices_r = get_slice_volume_indices(vox, min_r=32.0, dr=1.0, max_r=83., phi_bins=self.phi_bins,
theta_bins=self.theta_bins)
# slow - only for benchmarking the fast implementation below (shells_to_sph, interp_theta_phi_surfaces)
# (vox_by_slices, theta_phi_by_slices) = _slice_3d_volume_slow(vox)
# convert 3d shells
(vox_by_slices, theta_phi_by_slices) = get_shells_from_indices(xyz_indices_r, vox)
if plot_slices:
plot_concentric_shells(vox_by_slices, base_folder=self.configs['io']['main_folder'], idx_slices=None,
create_animation=False)
image_by_slices = interp_theta_phi_surfaces(theta_phi_by_slices, theta_bins=self.theta_bins_fine,
phi_bins=self.phi_bins_fine)
if plot_slices_sph_coords:
plot_concentric_shells_spherical_coords(image_by_slices, base_folder=self.configs['io']['main_folder'],
idx_slices=None)
coeffs_list = []
nl_list = []
ls_list = []
for idx_slice in range(image_by_slices.shape[0]):
logger.debug("img #{} max: {}".format(idx_slice, image_by_slices[idx_slice].max()))
# set to zero the spherical harmonics coefficients above self.sph_l_cutoff
coeffs = SHExpandDH(image_by_slices[idx_slice], sampling=2)
coeffs_filtered = coeffs.copy()
coeffs_filtered[:, self.sph_l_cutoff:, :] = 0.
coeffs = coeffs_filtered.copy()
nl = coeffs.shape[0]
ls = np.arange(nl)
coeffs_list.append(coeffs)
nl_list.append(nl)
ls_list.append(ls)
coeffs = np.asarray(coeffs_list).reshape(image_by_slices.shape[0], coeffs.shape[0], coeffs.shape[1],
coeffs.shape[2])
sh_coeffs_list = []
for idx_slice in range(coeffs.shape[0]):
sh_coeffs = SHCoeffs.from_array(coeffs[idx_slice])
sh_coeffs_list.append(sh_coeffs)
sh_spectrum_list = []
for sh_coeff in sh_coeffs_list:
sh_spectrum = sh_coeff.spectrum(convention='l2norm')
sh_spectrum_list.append(sh_spectrum)
sh_spectra = np.asarray(sh_spectrum_list).reshape(coeffs.shape[0], -1)
# cut the spherical harmonics expansion to sph_l_cutoff order
logger.debug('Spherical harmonics spectra maximum before normalization: {}'.format(sh_spectra.max()))
sh_spectra = sh_spectra[:, :self.sph_l_cutoff]
sh_spectra = (sh_spectra - sh_spectra.min()) / (sh_spectra.max() - sh_spectra.min())
# add results in ASE structure info
descriptor_data = dict(descriptor_name=self.name, descriptor_info=str(self),
diffraction_3d_sh_spectrum=sh_spectra)
else:
# return array with zeros for structures with less than min_nb_atoms
sh_spectra = np.zeros((52, int(self.sph_l_cutoff)))
descriptor_data = dict(descriptor_name=self.name, descriptor_info=str(self),
diffraction_3d_sh_spectrum=sh_spectra)
structure.info['descriptor'] = descriptor_data
return structure
def write(self, structure, tar=None, op_id=0, write_sh_spectra_npy=False, write_sh_spectra_png=True,
write_geo=True, format_geometry='aims'):
"""
Parameters:
structure: class, ASE atoms class
Instance of the class ASE atoms class
format_geometry: string, optional (default='aims')
File output format. All ASE valid output formats are accepted.
For a list: https://wiki.fysik.dtu.dk/ase/ase/io/io.html
"""
if not is_descriptor_consistent(structure, self):
raise Exception('Descriptor not consistent. Aborting.')
desc_folder = self.configs['io']['desc_folder']
descriptor_info = structure.info['descriptor']['descriptor_info']
sh_spectra = structure.info['descriptor']['diffraction_3d_sh_spectrum']
if write_sh_spectra_npy:
sh_spectra_filename_npy = os.path.abspath(os.path.normpath(os.path.join(desc_folder, structure.info[
'label'] + '_op' + str(op_id) + self.desc_metadata.ix['diffraction_3d_sh_spectrum']['file_ending'])))
np.save(sh_spectra_filename_npy, sh_spectra)
structure.info['diff_3d_sh_spectrum_filename_npy'] = sh_spectra_filename_npy
tar.add(structure.info['diff_3d_sh_spectrum_filename_npy'])
if write_sh_spectra_png:
sh_spectra_filename_png = os.path.abspath(os.path.normpath(os.path.join(desc_folder, structure.info[
'label'] + '_op' + str(op_id) + self.desc_metadata.ix['diffraction_3d_sh_spectrum_image'][
'file_ending'])))
plt.imsave(sh_spectra_filename_png, sh_spectra)
structure.info['diff_3d_sh_spectrum_filename_png'] = sh_spectra_filename_png
tar.add(structure.info['diff_3d_sh_spectrum_filename_png'])
if write_geo:
# to have the file accessible by the Beaker notebook image we need to put them
# in a special folder ('/user/tmp')
if self.configs['runtime']['isBeaker']:
# only for Beaker Notebook
coord_filename_in = os.path.abspath(os.path.normpath(os.path.join('/user/tmp/',
structure.info['label'] +
self.desc_metadata.ix[
'diffraction_3d_coordinates'][
'file_ending'])))
else:
coord_filename_in = os.path.abspath(os.path.normpath(os.path.join(desc_folder, structure.info['label'] +
self.desc_metadata.ix[
'diffraction_3d_coordinates'][
'file_ending'])))
structure.write(coord_filename_in, format=format_geometry)
structure.info['diff_3d_coord_filename_in'] = coord_filename_in
tar.add(structure.info['diff_3d_coord_filename_in'])
def get_design_matrix(structures, method='flatten_images', nn_model=None, layer_name=None):
"""Starting from atomic structures calculate the design matrix for the three-dimensional diffraction fingerprint.
The list of structures must contain the calculated :py:class:`ai4materials.descriptors.diffraction3d.Diffraction3D`.
Parameters:
structures: ``ase.Atoms`` object or list of ``ase.Atoms`` object
Atomic structure or list of atomic structure.
Return:
np.ndarray, shape [n_samples, n_features]
Returns the design matrix.
.. codeauthor:: <NAME> <<EMAIL>>
"""
images = []
for idx_structure, structure in enumerate(structures):
diffraction_3d_sh_spectrum = structure.info['descriptor']['diffraction_3d_sh_spectrum']
images.append(diffraction_3d_sh_spectrum)
images = np.asarray(images)
images = np.reshape(images, (images.shape[0], -1, images.shape[1], images.shape[2]))
if method == 'flatten_images':
design_matrix = np.reshape(images, (images.shape[0], -1))
elif method == 'nn_representation':
if nn_model is not None:
logger.info("Using the convolutional neural network filters as feature matrix.")
logger.info("Layer name: {0}".format(layer_name))
logger.debug(nn_model.summary())
activations = np.asarray(get_activations(nn_model, images, print_shape_only=True, layer_name=layer_name))
design_matrix = np.reshape(activations, (activations.shape[1], -1))
else:
raise ValueError("Please pass a valid Keras neural network model.")
logger.info("Feature matrix shape: {0}".format(design_matrix.shape))
return design_matrix
def plot_3d_volume(power_spect):
"""Generate a 3d plot given a numpy array with Mayavi.
This function can be used to plot any three-dimensional field, passed as a np.array.
It uses the `mayavi.tools.pipeline.volume` from Mayavi:
http://docs.enthought.com/mayavi/mayavi/auto/mlab_pipeline_other_functions.html#volume
In the plot it is assumed that the elements of the array are equally spaced.
Parameters:
power_spect: np.ndarray, shape [n_px, n_py, n_pz]
Array containing a three-dimensional quantity (i.e. field).
.. codeauthor:: <NAME> <<EMAIL>>
"""
try:
from mayavi import mlab
except ImportError:
raise ImportError("Could not import Mayavi. Mayavi is required for 3d plotting.")
mlab.figure(1, bgcolor=(0.5, 0.5, 0.5), size=(800, 800))
mlab.options.offscreen = False
mlab.clf()
# remove nan and normalize the spectrum for plotting purposes only
power_spect_plot = np.nan_to_num(power_spect)
power_spect_plot_norm = (power_spect_plot - power_spect_plot.min()) / (
power_spect_plot.max() - power_spect_plot.min())
src = mlab.pipeline.scalar_field(power_spect_plot_norm)
field_min = power_spect_plot_norm.min()
field_max = power_spect_plot_norm.max()
mlab.pipeline.volume(src, vmin=0., vmax=field_min + .5 * (field_max - field_min))
mlab.colorbar(title='Field intensity', orientation='vertical')
# insert plane parallel to axis passing through the origin
mlab.pipeline.image_plane_widget(src, plane_orientation='x_axes', slice_index=power_spect_plot_norm.shape[0] / 2, )
mlab.pipeline.image_plane_widget(src, plane_orientation='y_axes', slice_index=power_spect_plot_norm.shape[1] / 2, )
mlab.pipeline.image_plane_widget(src, plane_orientation='z_axes', slice_index=power_spect_plot_norm.shape[2] / 2, )
mlab.colorbar(title='Field intensity', orientation='vertical')
mlab.show()
mlab.close(all=True)
def plot_concentric_shells(vox_by_slices, base_folder, idx_slices=None, create_animation=False):
"""Plot the concentric shells for a given three-dimensional volumetric shape.
The volumetric shape is the three-dimensional diffraction intensity, as calculated by
:py:mod:`ai4materials.descriptors.diffraction3d.Diffraction3D`. To plot the concentric shells
for different voxel np.ndarray shapes simply change ``x, y, z = np.mgrid[0:176:176j, 0:176:176j, 0:176:176j]``
to your desire meshgrid.
Parameters:
vox_by_slices: np.ndarray, shape [n_slices, n_px, n_py, n_pz]
4-dimensional array containing each concentric shell obtained from
:py:mod:`ai4materials.descriptors.diffraction3d.Diffraction3D`.
``n_px``, ``n_py``, ``n_pz`` are given by the interpolation and the region of the space
considered. In our case, ``n_slices=52``, ``n_px=n_py=n_pz=176``.
base_folder: str
Folder to save the figures generated. The figures are saved in a subfolder folder ``shells_png`` of
``base_folder``.
idx_slices: list of int, optional (default=None)
List of integers defining which concentric shells to plot.
If `None`, all concentric shells are plotted.
create_animation: bool, optional (default=True)
If `True` create an animation containing all concentric shells.
.. codeauthor:: <NAME> <<EMAIL>>
"""
try:
from mayavi import mlab
except ImportError:
raise ImportError("Could not import Mayavi. Mayavi is required for 3d plotting.")
if idx_slices is None:
idx_slices = range(1, vox_by_slices.shape[0], 1)
# create folder for saving files
shells_images_folder = os.path.join(base_folder, 'png_shells')
if not os.path.exists(shells_images_folder):
os.makedirs(shells_images_folder)
filename_png_list = []
x, y, z = np.mgrid[0:176:176j, 0:176:176j, 0:176:176j]
mlab.clf()
for idx_slice in idx_slices:
mlab.options.offscreen = False
filename_png = os.path.join(shells_images_folder, 'desc_slice_' + str(idx_slice) + '.png')
filename_png_list.append(filename_png)
scalars = vox_by_slices[idx_slice]
c_of_mass = ndimage.measurements.center_of_mass(scalars)
logger.info("Center of mass: | |
**kwargs):
pass
def MStreamUtils_swiginit(*args, **kwargs):
pass
def MFnSubd_edgeIsValid(*args, **kwargs):
pass
def MFnCamera_stereoHITEnabled(*args, **kwargs):
pass
def MFnReflectShader_reflectedColor(*args, **kwargs):
pass
def MGlobal_disableStow(*args, **kwargs):
pass
def MFnMesh_removeFaceVertexColors(*args, **kwargs):
pass
def MURI_getQueryValueDelimiter(*args, **kwargs):
pass
def MDistance_uiToInternal(*args, **kwargs):
pass
def MFnAttribute_setUsedAsColor(*args, **kwargs):
pass
def MNodeMessage_addNodePreRemovalCallback(*args, **kwargs):
pass
def MFnNurbsSurface_numBoundaries(*args, **kwargs):
pass
def MFnTransform_transformation(*args, **kwargs):
pass
def MItMeshEdge_edge(*args, **kwargs):
pass
def MFnDependencyNode_dgTimer(*args, **kwargs):
pass
def MPlugArray_swigregister(*args, **kwargs):
pass
def new_MTimerMessage(*args, **kwargs):
pass
def delete_MFcurveEdit(*args, **kwargs):
pass
def MIteratorType_swiginit(*args, **kwargs):
pass
def MFnMesh_getUVSetFamilyNames(*args, **kwargs):
pass
def MFnStringArrayData_swigregister(*args, **kwargs):
pass
def MFnContainerNode_getPublishedPlugs(*args, **kwargs):
pass
def new_MSelectionMask(*args, **kwargs):
pass
def MProfilingScope_className(*args, **kwargs):
pass
def MFloatVector_y_get(*args, **kwargs):
pass
def MGlobal_setDisplayCVs(*args, **kwargs):
pass
def MFnDagNode_className(*args, **kwargs):
pass
def MFnLightDataAttribute_create(*args, **kwargs):
pass
def MFnNurbsSurface_formInU(*args, **kwargs):
pass
def new_MIffTag(*args, **kwargs):
pass
def MItSurfaceCV_hasHistoryOnCreate(*args, **kwargs):
pass
def new_MFnMesh(*args, **kwargs):
pass
def MAngle_asUnits(*args, **kwargs):
pass
def MDataHandle_setGenericBool(*args, **kwargs):
pass
def MItMeshPolygon_getColorIndex(*args, **kwargs):
pass
def MItSubdEdge_next(*args, **kwargs):
pass
def MFileObject_ithFullName(*args, **kwargs):
pass
def MItDependencyGraph_prune(*args, **kwargs):
pass
def MFloatPointArray_setLength(*args, **kwargs):
pass
def MGlobal_setYAxisUp(*args, **kwargs):
pass
def MFnVolumeLight_className(*args, **kwargs):
pass
def MFnGenericAttribute_addAccept(*args, **kwargs):
pass
def MColorArray_remove(*args, **kwargs):
pass
def MUintArray_swigregister(*args, **kwargs):
pass
def MScriptUtil_setShort4ArrayItem(*args, **kwargs):
pass
def MVector_swiginit(*args, **kwargs):
pass
def MEventMessage_addEventCallback(*args, **kwargs):
pass
def MFnSingleIndexedComponent_swigregister(*args, **kwargs):
pass
def MDGModifier_newPlugValueFloat(*args, **kwargs):
pass
def MAttributePattern_findPattern(*args, **kwargs):
pass
def MInt64Array_length(*args, **kwargs):
pass
def MMatrixArray___getitem__(*args, **kwargs):
pass
def MFnCamera_setVerticalShake(*args, **kwargs):
pass
def MFnLambertShader_glowIntensity(*args, **kwargs):
pass
def MTime_setValue(*args, **kwargs):
pass
def delete_doublePtr(*args, **kwargs):
pass
def MURI_asString(*args, **kwargs):
pass
def MFnPluginData_data(*args, **kwargs):
pass
def MFnNonAmbientLight_swigregister(*args, **kwargs):
pass
def MDistance_setUnit(*args, **kwargs):
pass
def MDoubleArray___eq__(*args, **kwargs):
pass
def MFnAttribute_isUsedAsFilename(*args, **kwargs):
pass
def MObjectArray_length(*args, **kwargs):
pass
def MFnNurbsSurface_normal(*args, **kwargs):
pass
def MGlobal_displayInfo(*args, **kwargs):
pass
def MFnAssembly_importFile(*args, **kwargs):
pass
def MFnNumericData_setData2Float(*args, **kwargs):
pass
def MFnMesh_getBlindDataTypes(*args, **kwargs):
pass
def MPlugArray_append(*args, **kwargs):
pass
def new_MTimer(*args, **kwargs):
pass
def MFnMesh_renameUVSet(*args, **kwargs):
pass
def MFnMesh_isNormalLocked(*args, **kwargs):
pass
def MRenderPassRegistry_registerRenderPassDefinition(*args, **kwargs):
pass
def MDagPath_hasFn(*args, **kwargs):
pass
def MFnMesh_getCreaseEdges(*args, **kwargs):
pass
def MNodeClass_swiginit(*args, **kwargs):
pass
def MFnVolumeLight_shadowAngle(*args, **kwargs):
pass
def MFnTransform_setRestPosition(*args, **kwargs):
pass
def uCharPtr_swiginit(*args, **kwargs):
pass
def MSceneMessage_addReferenceCallback(*args, **kwargs):
pass
def MProfiler_getBufferSize(*args, **kwargs):
pass
def MFloatVector___isub__(*args, **kwargs):
pass
def MQuaternion_z_set(*args, **kwargs):
pass
def MFnNurbsSurfaceData_swigregister(*args, **kwargs):
pass
def MItSurfaceCV_position(*args, **kwargs):
pass
def MFnDoubleIndexedComponent_swiginit(*args, **kwargs):
pass
def MFnMatrixData_isTransformation(*args, **kwargs):
pass
def MGlobal_getSelectionListByName(*args, **kwargs):
pass
def MPlug_numElements(*args, **kwargs):
pass
def MDataHandle_set2Double(*args, **kwargs):
pass
def delete_MItMeshPolygon(*args, **kwargs):
pass
def MItSubdFace_swigregister(*args, **kwargs):
pass
def MFileObject_assign(*args, **kwargs):
pass
def MItDependencyGraph_toggleDirection(*args, **kwargs):
pass
def MItMeshVertex_connectedToFace(*args, **kwargs):
pass
def MFnVectorArrayData_set(*args, **kwargs):
pass
def MFnCamera_setShutterAngle(*args, **kwargs):
pass
def new_MFnFloatArrayData(*args, **kwargs):
pass
def MArrayDataBuilder_removeElement(*args, **kwargs):
pass
def MUintArray_clear(*args, **kwargs):
pass
def MInt64Array___delitem__(*args, **kwargs):
pass
def MScriptUtil_getFloatArrayItem(*args, **kwargs):
pass
def MEulerRotation_closestSolution(*args, **kwargs):
pass
def MFnSet_annotation(*args, **kwargs):
pass
def MDGModifier_deleteNode(*args, **kwargs):
pass
def new_MImage(*args, **kwargs):
pass
def MModelMessage_addNodeAddedToModelCallback(*args, **kwargs):
pass
def MFnNurbsCurve_closestPoint(*args, **kwargs):
pass
def MStreamUtils_readInt(*args, **kwargs):
pass
def MFnCamera_getFilmApertureLimits(*args, **kwargs):
pass
def MTimeArray_insert(*args, **kwargs):
pass
def MFnUInt64ArrayData_className(*args, **kwargs):
pass
def MMeshIntersector_swiginit(*args, **kwargs):
pass
def MFnPhongEShader_setWhiteness(*args, **kwargs):
pass
def MFnAnisotropyShader_roughness(*args, **kwargs):
pass
def MUuid_generate(*args, **kwargs):
pass
def MFnDependencyNode_setExternalContent(*args, **kwargs):
pass
def MFnAssembly_activate(*args, **kwargs):
pass
def charPtr_swigregister(*args, **kwargs):
pass
def new_MObjectHandle(*args, **kwargs):
pass
def MFnMesh_setPoint(*args, **kwargs):
pass
def MRenderPassDef_className(*args, **kwargs):
pass
def MDAGDrawOverrideInfo_fLOD_get(*args, **kwargs):
pass
def MFnMesh_unlockVertexNormals(*args, **kwargs):
pass
def new_MProfilingScope(*args, **kwargs):
pass
def new_array2dDouble(*args, **kwargs):
pass
def MFnDirectionalLight_setUseLightPosition(*args, **kwargs):
pass
def MFloatMatrix_homogenize(*args, **kwargs):
pass
def MSelectionList_remove(*args, **kwargs):
pass
def MFloatVectorArray_className(*args, **kwargs):
pass
def MFnLayeredShader_setHardwareShader(*args, **kwargs):
pass
def MItSubdFace_className(*args, **kwargs):
pass
def MFnDoubleArrayData_array(*args, **kwargs):
pass
def MFnMatrixArrayData_array(*args, **kwargs):
pass
def MVector_length(*args, **kwargs):
pass
def MItGeometry_setAllPositions(*args, **kwargs):
pass
def MDataHandle_setShort(*args, **kwargs):
pass
def MFnNurbsCurve_length(*args, **kwargs):
pass
def MFileIO_mustRenameToSaveMsg(*args, **kwargs):
pass
def MItMeshPolygon_getPointAtUV(*args, **kwargs):
pass
def MIntArray_append(*args, **kwargs):
pass
def MFileIO_unloadReference(*args, **kwargs):
pass
def MFnVolumeLight_setShadowAngle(*args, **kwargs):
pass
def MUint64Array_setLength(*args, **kwargs):
pass
def delete_MArrayDataHandle(*args, **kwargs):
pass
def MUint64Array___add__(*args, **kwargs):
pass
def MFnDagNode_parentCount(*args, **kwargs):
pass
def MDGMessage_swigregister(*args, **kwargs):
pass
def MInt64Array_sizeIncrement(*args, **kwargs):
pass
def MFnAttribute_isStorable(*args, **kwargs):
pass
def MEulerRotation___mul__(*args, **kwargs):
pass
def MMeshSmoothOptions_openSubdivVertexBoundary(*args, **kwargs):
pass
def MImageFileInfo_imageType(*args, **kwargs):
pass
def MImage_floatPixels(*args, **kwargs):
pass
def MFnNumericAttribute_hasMax(*args, **kwargs):
pass
def MFnCamera_set(*args, **kwargs):
pass
def MTesselationParams_setUDistanceFraction(*args, **kwargs):
pass
def MFnAttribute_disconnectBehavior(*args, **kwargs):
pass
def array4dDouble_swiginit(*args, **kwargs):
pass
def new_boolPtr(*args, **kwargs):
pass
def MCallbackIdArray_swigregister(*args, **kwargs):
pass
def MDagMessage_addInstanceRemovedCallback(*args, **kwargs):
pass
def MFnAmbientLight_setShadowRadius(*args, **kwargs):
pass
def MPlug_info(*args, **kwargs):
pass
def MFnDependencyNode_isFlagSet(*args, **kwargs):
pass
def MFnMesh_getBlindDataFaceVertexIndices(*args, **kwargs):
pass
def delete_MVectorArray(*args, **kwargs):
pass
def MPlugArray_clear(*args, **kwargs):
pass
def MFnMesh_intersect(*args, **kwargs):
pass
def MDagPathArray_insert(*args, **kwargs):
pass
def MScriptUtil_getUchar(*args, **kwargs):
pass
def array2dFloat_get(*args, **kwargs):
pass
def MFnDagNode_removeChild(*args, **kwargs):
pass
def delete_MFnLayeredShader(*args, **kwargs):
pass
def MFnCameraSet_isLayerActive(*args, **kwargs):
pass
def MVector___xor__(*args, **kwargs):
pass
def MFileIO_exportAsReference(*args, **kwargs):
pass
def MItMeshEdge_connectedToEdge(*args, **kwargs):
pass
def MColorArray_setSizeIncrement(*args, **kwargs):
pass
def MFnCamera_isDisplayFilmGate(*args, **kwargs):
pass
def MTrimBoundaryArray_swigregister(*args, **kwargs):
pass
def MFnSubd_vertexNormal(*args, **kwargs):
pass
def MArrayDataHandle_outputValue(*args, **kwargs):
pass
def MMessage_className(*args, **kwargs):
pass
def MDGContext_fsNormal_get(*args, **kwargs):
pass
def MItCurveCV_next(*args, **kwargs):
pass
def MFnArrayAttrsData_intArray(*args, **kwargs):
pass
def MDoubleArray_swigregister(*args, **kwargs):
pass
def MFnSubd_polygonHasVertexUVs(*args, **kwargs):
pass
def MFnAttribute_setWritable(*args, **kwargs):
pass
def MFnMesh_assignColor(*args, **kwargs):
pass
def MFnDagNode_getConnectedSetsAndMembers(*args, **kwargs):
pass
def MIntArray___add__(*args, **kwargs):
pass
def MFnNumericAttribute_getSoftMax(*args, **kwargs):
pass
def MPlane_distance(*args, **kwargs):
pass
def MComputation_swigregister(*args, **kwargs):
pass
def delete_MObject(*args, **kwargs):
pass
def new_MCallbackIdArray(*args, **kwargs):
pass
def MDagMessage_addParentAddedDagPathCallback(*args, **kwargs):
pass
def intPtr_swiginit(*args, **kwargs):
pass
def MPlug_connectionByPhysicalIndex(*args, **kwargs):
pass
def delete_MFloatVectorArray(*args, **kwargs):
pass
def MVector_angle(*args, **kwargs):
pass
def MFnDependencyNode_findPlug(*args, **kwargs):
pass
def MFnNurbsCurve_getKnots(*args, **kwargs):
pass
def MFnMesh_deleteColorSet(*args, **kwargs):
pass
def MMessageNode_fNextNode_set(*args, **kwargs):
pass
def MArgParser_getObjects(*args, **kwargs):
pass
def MFnMesh_setIsColorClamped(*args, **kwargs):
pass
def MDataHandle_asFloatMatrix(*args, **kwargs):
pass
def MFnDagNode_dagRoot(*args, **kwargs):
pass
def MAttributeSpecArray_clear(*args, **kwargs):
pass
def MScriptUtil_asUint(*args, **kwargs):
pass
def MFloatArray___setitem__(*args, **kwargs):
pass
def MFnSubd_swiginit(*args, **kwargs):
pass
def MQuaternion_inverse(*args, **kwargs):
pass
def MFnCamera_className(*args, **kwargs):
pass
def new_MFnCompoundAttribute(*args, **kwargs):
pass
def MFnIntArrayData_length(*args, **kwargs):
pass
def MUuid___eq__(*args, **kwargs):
pass
def MColorArray_swiginit(*args, **kwargs):
pass
def MFnDirectionalLight_swiginit(*args, **kwargs):
pass
def shortPtr_swigregister(*args, **kwargs):
pass
def MFnLambertShader_setRtRefractedColor(*args, **kwargs):
pass
def MFileIO_currentFile(*args, **kwargs):
pass
def new_MFnPartition(*args, **kwargs):
pass
def delete_MItMeshFaceVertex(*args, **kwargs):
pass
def MItMeshVertex_setIndex(*args, **kwargs):
pass
def MTransformationMatrix_scalePivot(*args, **kwargs):
pass
def MFnAttribute_setArray(*args, **kwargs):
pass
def MItDependencyNodes_className(*args, **kwargs):
pass
def MTransformationMatrix_swiginit(*args, **kwargs):
pass
def MFnSubd_vertexIdFromBaseVertexIndex(*args, **kwargs):
pass
def MAttributeIndex_hasValidRange(*args, **kwargs):
pass
def MFnLight_lightAmbient(*args, **kwargs):
pass
def MDataBlock_outputValue(*args, **kwargs):
pass
def MFnAreaLight_swiginit(*args, **kwargs):
pass
def MProfiler_getThreadId(*args, **kwargs):
pass
def MGlobal_mayaState(*args, **kwargs):
pass
def MFnDagNode_usingObjectColor(*args, **kwargs):
pass
def MImage_depthMap(*args, **kwargs):
pass
def MLockMessage_setNodeLockQueryCallback(*args, **kwargs):
pass
def MFnNumericAttribute_className(*args, **kwargs):
pass
def MPointArray_set(*args, **kwargs):
pass
def MCommandResult_className(*args, **kwargs):
pass
def new_MFnUInt64ArrayData(*args, **kwargs):
pass
def MCacheFormatDescription_addChannel(*args, **kwargs):
pass
def MFnExpression_getDefaultObject(*args, **kwargs):
pass
def MItDag_traverseUnderWorld(*args, **kwargs):
pass
def MFnDependencyNode_getAffectedAttributes(*args, **kwargs):
pass
def MFloatPoint_homogenize(*args, **kwargs):
pass
def MVectorArray_copy(*args, **kwargs):
pass
def new_MFnNurbsCurve(*args, **kwargs):
pass
def MDataHandle_asNurbsCurve(*args, **kwargs):
pass
def MColor_g_get(*args, **kwargs):
pass
def MArgDatabase_getObjects(*args, **kwargs):
pass
def MDataHandle_asAngle(*args, **kwargs):
pass
def MFnSpotLight_swiginit(*args, **kwargs):
pass
def MAttributePattern_attrPatternCount(*args, **kwargs):
pass
def MScriptUtil_asShort2Ptr(*args, **kwargs):
pass
def delete_MFloatArray(*args, **kwargs):
pass
def MScriptUtil_setBoolArray(*args, **kwargs):
pass
def new_MFnReference(*args, **kwargs):
pass
def MFnComponent_swiginit(*args, **kwargs):
pass
def MUserData_swigregister(*args, **kwargs):
pass
def MFnSet_removeMember(*args, **kwargs):
pass
def MDoubleArray_get(*args, **kwargs):
pass
def MFnAttribute_addToCategory(*args, **kwargs):
pass
def MEvaluationNode_dirtyPlug(*args, **kwargs):
pass
def new_MFnNurbsSurface(*args, **kwargs):
pass
def MFnNurbsSurface_getUV(*args, **kwargs):
pass
def MFnTransform_setScalePivot(*args, **kwargs):
pass
def MItMeshFaceVertex_faceVertId(*args, **kwargs):
pass
def MTransformationMatrix_rotateTo(*args, **kwargs):
pass
def MIteratorType_getFilterList(*args, **kwargs):
pass
def MTransformationMatrix_setScalePivotTranslation(*args, **kwargs):
pass
def MFnMesh_assignUV(*args, **kwargs):
pass
def delete_MFnSubd(*args, **kwargs):
pass
def MFnLight_setIntensity(*args, **kwargs):
pass
def MDagPath_getPath(*args, **kwargs):
pass
def MQuaternion_getAxisAngle(*args, **kwargs):
pass
def MFnNonExtendedLight_depthMapBias(*args, **kwargs):
pass
def MPoint_x_get(*args, **kwargs):
pass
def MFnDependencyNode_typeId(*args, **kwargs):
pass
def MGlobal_setPreselectionHiliteList(*args, **kwargs):
pass
def MFnDagNode_inModel(*args, **kwargs):
pass
def MFnNurbsSurface_numKnotsInU(*args, **kwargs):
pass
def delete_MMatrix(*args, **kwargs):
pass
def MFnNumericAttribute_getMin(*args, **kwargs):
pass
def MFnMesh_collapseFaces(*args, **kwargs):
pass
def new_MAngle(*args, **kwargs):
pass
def MBoundingBox_expand(*args, **kwargs):
pass
def MFnExpression_create(*args, **kwargs):
pass
def MItSelectionList_setFilter(*args, **kwargs):
pass
def delete_MItDag(*args, **kwargs):
pass
def MPlug_setBool(*args, **kwargs):
pass
def MFloatPoint_setCast(*args, **kwargs):
pass
def MFnVectorArrayData_array(*args, **kwargs):
pass
def MFnNumericData_setData3Short(*args, **kwargs):
pass
def MScriptUtil_createFloatMatrixFromList(*args, **kwargs):
pass
def MColor___imul__(*args, **kwargs):
pass
def MURI_setUserName(*args, **kwargs):
pass
def MArgList_asTime(*args, **kwargs):
pass
def MScriptUtil_getUint4ArrayItem(*args, **kwargs):
pass
def MDataHandle_acceptedTypeIds(*args, **kwargs):
pass
def MFnSpotLight_penumbraAngle(*args, **kwargs):
pass
def MDGModifier_pythonCommandToExecute(*args, **kwargs):
pass
def MAttributeSpec___getitem__(*args, **kwargs):
pass
def MFnIntArrayData_className(*args, **kwargs):
pass
def MTesselationParams_setMaxEdgeLength(*args, **kwargs):
pass
def MFnSubd_edgeIsCreased(*args, **kwargs):
pass
def array3dFloat_set(*args, **kwargs):
pass
def MVectorArray_swigregister(*args, **kwargs):
pass
def MFnCamera_setStereoHITEnabled(*args, **kwargs):
pass
def MFnReflectShader_setReflectedColor(*args, **kwargs):
pass
def MFnMesh_duplicateFaces(*args, **kwargs):
pass
def MFnCamera_swiginit(*args, **kwargs):
pass
def boolPtr_cast(*args, **kwargs):
pass
def MURI_getQueryPairDelimiter(*args, **kwargs):
pass
def delete_shortPtr(*args, **kwargs):
pass
def MDistance_className(*args, **kwargs):
pass
def MFnAttribute_setIndeterminant(*args, **kwargs):
pass
def MFnNurbsSurface_boundaryType(*args, **kwargs):
pass
def MFnTransform_set(*args, **kwargs):
pass
def MItMeshEdge_index(*args, **kwargs):
pass
def MGlobal_className(*args, **kwargs):
pass
def MPlug_name(*args, **kwargs):
pass
def delete_MRichSelection(*args, **kwargs):
pass
def MItDependencyNodes_swigregister(*args, **kwargs):
pass
def MFnMesh_getUVSetsInFamily(*args, **kwargs):
pass
def MFnStringArrayData_swiginit(*args, **kwargs):
pass
def MFnContainerNode_getPublishedNames(*args, **kwargs):
pass
def delete_MSelectionMask(*args, **kwargs):
pass
def MPoint_z_get(*args, **kwargs):
pass
def MFloatVector_z_set(*args, **kwargs):
pass
def MGlobal_selectByName(*args, **kwargs):
pass
def new_MFnDagNode(*args, **kwargs):
pass
def MFnNurbsSurface_formInV(*args, **kwargs):
pass
def MLockMessage_className(*args, **kwargs):
pass
def MFnCamera_setEyePoint(*args, **kwargs):
pass
def MFnMesh_create(*args, **kwargs):
pass
def MAngle_asAngSeconds(*args, **kwargs):
pass
def MDataHandle_setGenericChar(*args, **kwargs):
pass
def MItMeshPolygon_getTriangles(*args, **kwargs):
pass
def delete_MItSubdFace(*args, **kwargs):
pass
def MFileObject_exists(*args, **kwargs):
pass
def new_MItDependencyNodes(*args, **kwargs):
pass
def MFnAttribute_setAffectsAppearance(*args, **kwargs):
pass
def MFloatPointArray_length(*args, **kwargs):
pass
def new_MFnVolumeLight(*args, **kwargs):
pass
def MFnGenericAttribute_removeDataAccept(*args, **kwargs):
pass
def MColorArray_insert(*args, **kwargs):
pass
def MUintArray_swiginit(*args, **kwargs):
pass
def MScriptUtil_getInt3ArrayItem(*args, **kwargs):
pass
def new_MWeight(*args, **kwargs):
pass
def MFnDependencyNode_hasAttribute(*args, **kwargs):
pass
def MEventMessage_getEventNames(*args, **kwargs):
pass
def MFnSingleIndexedComponent_swiginit(*args, **kwargs):
pass
def MFnCameraSet_swigregister(*args, **kwargs):
pass
def MDGModifier_newPlugValueInt(*args, **kwargs):
pass
def MAttributeSpecArray_className(*args, **kwargs):
pass
def MNamespace_addNamespace(*args, **kwargs):
pass
def MTesselationParams_setStdMinEdgeLength(*args, **kwargs):
pass
def MFnCamera_shakeOverscanEnabled(*args, **kwargs):
pass
def MFnLambertShader_setGlowIntensity(*args, **kwargs):
pass
def MTime_asUnits(*args, **kwargs):
pass
def MURI_getScheme(*args, **kwargs):
pass
def MMeshSmoothOptions_setSmoothUVs(*args, **kwargs):
pass
def MFnDagNode_fullPathName(*args, **kwargs):
pass
def MPlug_selectAncestorLogicalIndex(*args, **kwargs):
pass
def MFnNonAmbientLight_swiginit(*args, **kwargs):
pass
def MDistance_setValue(*args, **kwargs):
pass
def MFnAttribute_affectsAppearance(*args, **kwargs):
pass
def MFnAssembly_getAbsoluteRepNamespace(*args, **kwargs):
pass
def MRenderPassRegistry_className(*args, **kwargs):
pass
def MPlugArray_setSizeIncrement(*args, **kwargs):
pass
def delete_intPtr(*args, **kwargs):
pass
def delete_MSpace(*args, **kwargs):
pass
def MAddRemoveAttrEdit_nodeName(*args, **kwargs):
pass
def MColor___ne__(*args, **kwargs):
pass
def MDagPath_apiType(*args, **kwargs):
pass
def MFnPartition_addMember(*args, **kwargs):
pass
def MFnTransform_resetFromRestPosition(*args, **kwargs):
pass
def array3dInt_set(*args, **kwargs):
pass
def MSceneMessage_addNamespaceRenamedCallback(*args, **kwargs):
pass
def MFloatVector___imul__(*args, **kwargs):
pass
def MQuaternion_exp(*args, **kwargs):
pass
def MFnNurbsSurfaceData_swiginit(*args, **kwargs):
pass
def MFnAttribute_setNiceNameOverride(*args, **kwargs):
pass
def MItSurfaceCV_swigregister(*args, **kwargs):
pass
def delete_MFnEnumAttribute(*args, **kwargs):
pass
def MFnMatrixData_transformation(*args, **kwargs):
pass
def MDataHandle_set3Short(*args, **kwargs):
pass
def MItMeshPolygon_isDone(*args, **kwargs):
pass
def MItMeshVertex_getColors(*args, **kwargs):
pass
def MFileObject_setRawName(*args, **kwargs):
pass
def MItDependencyGraph_swiginit(*args, **kwargs):
pass
def MFnUint64SingleIndexedComponent_swiginit(*args, **kwargs):
pass
def MGlobal_swiginit(*args, **kwargs):
pass
def MFnCamera_shutterAngle(*args, **kwargs):
pass
def MFnFloatArrayData_length(*args, **kwargs):
pass
def MArrayDataHandle_set(*args, **kwargs):
pass
def MUintArray_get(*args, **kwargs):
pass
def MInt64Array___repr__(*args, **kwargs):
pass
def MScriptUtil_swigregister(*args, **kwargs):
pass
def MFnSpotLight_create(*args, **kwargs):
pass
def MEulerRotation_setToClosestSolution(*args, **kwargs):
pass
def MFnSet_setAnnotation(*args, **kwargs):
pass
def MDGModifier_renameNode(*args, **kwargs):
pass
def MFnPointLight_className(*args, **kwargs):
pass
def MIntArray___setitem__(*args, **kwargs):
pass
def MFnDagNode_setIntermediateObject(*args, **kwargs):
pass
def MSetAttrEdit_plugName(*args, **kwargs):
pass
def MFnCamera_setAspectRatio(*args, **kwargs):
pass
def MTimeArray_append(*args, **kwargs):
pass
def MObject_hasFn(*args, **kwargs):
pass
def doublePtr_frompointer(*args, **kwargs):
pass
def new_MPointOnMesh(*args, **kwargs):
pass
def MFnPhongEShader_swigregister(*args, **kwargs):
pass
def MFnAnisotropyShader_setRoughness(*args, **kwargs):
pass
def MNodeMessage_addNodeAboutToDeleteCallback(*args, **kwargs):
pass
def MVector___add__(*args, **kwargs):
pass
def MFnDependencyNode_enableDGTiming(*args, **kwargs):
pass
def MFnAssembly_getActive(*args, **kwargs):
pass
def MFnMesh_removeVertexColors(*args, **kwargs):
pass
def MObjectArray_insert(*args, **kwargs):
pass
def MFnMesh_getPoint(*args, **kwargs):
pass
def | |
#!/usr/bin/env python
# Copyright (c) 2014-2018 <NAME>, Ph.D.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Input/output: default units are METERS and DEGREES.
boolean deg=True means degrees
For most functions you can input Numpy arrays of any shape, except as noted in the functions
see tests/Test.py for example uses.
"""
from __future__ import division
from copy import deepcopy
from six import string_types,PY2
from datetime import datetime
try:
import numpy
from numpy import sin, cos, tan, sqrt, radians, arctan2, hypot, degrees
except ImportError:
numpy = None
from math import sin, cos, tan, sqrt, radians, hypot, degrees
from math import atan2 as arctan2
try:
from astropy.time import Time
from astropy import units as u
from astropy.coordinates import Angle,SkyCoord, EarthLocation, AltAz, ICRS
except ImportError:
Time = None
#
from .vallado import vazel2radec, vradec2azel
from .timeconv import str2dt
class EarthEllipsoid:
"""generate reference ellipsoid"""
def __init__(self,model='wgs84'):
if model == 'wgs84':
"""https://en.wikipedia.org/wiki/World_Geodetic_System#WGS84"""
self.a = 6378137. # semi-major axis [m]
self.f = 1 / 298.2572235630 # flattening
self.b = self.a * (1 - self.f) # semi-minor axis
elif model=='grs80':
"""https://en.wikipedia.org/wiki/GRS_80"""
self.a = 6378137. # semi-major axis [m]
self.f = 1 / 298.257222100882711243 # flattening
self.b = self.a * (1 - self.f) # semi-minor axis
#%% to AER (azimuth, elevation, range)
def ecef2aer(x, y, z, lat0, lon0, h0, ell=None, deg=True):
"""
Observer => Point
input:
-----
x,y,z [meters] target ECEF location [0,Infinity)
lat0, lon0 (degrees/radians) Observer coordinates on ellipsoid [-90,90],[-180,180]
h0 [meters] observer altitude [0,Infinity)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output: AER
------
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
"""
xEast, yNorth, zUp = ecef2enu(x, y, z, lat0, lon0, h0, ell, deg=deg)
return enu2aer(xEast, yNorth, zUp, deg=deg)
def eci2aer(eci, lat0, lon0, h0, t):
"""
Observer => Point
input
-----
eci [meters] Nx3 target ECI location (x,y,z) [0,Infinity)
lat0, lon0 (degrees/radians) Observer coordinates on ellipsoid [-90,90],[-180,180]
h0 [meters] observer altitude [0,Infinity)
t time (datetime.datetime) time of obsevation (UTC)
output: AER
------
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
"""
ecef = eci2ecef(eci, t)
return ecef2aer(ecef[:, 0], ecef[:, 1], ecef[:, 2], lat0, lon0, h0)
def enu2aer(e, n, u, deg=True):
"""
Observer => Point
input
-----
e,n,u [meters] East, north, up [0,Infinity)
deg degrees input/output (False: radians in/out)
output: AER
------
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
"""
r = hypot(e, n)
slantRange = hypot(r, u)
elev = arctan2(u, r)
az = arctan2(e, n) % (2 * arctan2(0, -1))
if deg:
return degrees(az), degrees(elev), slantRange
else:
return az, elev, slantRange # radians
def geodetic2aer(lat, lon, h, lat0, lon0, h0, ell=None, deg=True):
"""
Observer => Point
input:
-----
Target: lat, lon, h (altitude, meters)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output: AER
------
azimuth, elevation (degrees/radians)
slant range [meters]
"""
e, n, u = geodetic2enu(lat, lon, h, lat0, lon0, h0, ell, deg=deg)
return enu2aer(e, n, u, deg=deg)
def ned2aer(n, e, d, deg=True):
"""
Observer => Point
input
-----
n,e,d [meters] North,east, down [0,Infinity)
deg degrees input/output (False: radians in/out)
output: AER
------
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
"""
return enu2aer(e, n, -d, deg=deg)
#%% to ECEF
def aer2ecef(az, el, srange, lat0, lon0, alt0, ell=None, deg=True):
"""
convert target azimuth, elevation, range (meters) from observer at lat0,lon0,alt0 to ECEF coordinates.
Input:
-----
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output: ECEF x,y,z [meters]
if you specify NaN for srange, return value z will be NaN
"""
# Origin of the local system in geocentric coordinates.
x0, y0, z0 = geodetic2ecef(lat0, lon0, alt0, ell, deg=deg)
# Convert Local Spherical AER to ENU
e1, n1, u1 = aer2enu(az, el, srange, deg=deg)
# Rotating ENU to ECEF
dx, dy, dz = _enu2uvw(e1, n1, u1, lat0, lon0, deg=deg)
# Origin + offset from origin equals position in ECEF
return x0 + dx, y0 + dy, z0 + dz
def eci2ecef(eci, t):
"""
Observer => Point
input
-----
eci [meters] Nx3 target ECI location (x,y,z) [0,Infinity)
t time (datetime.datetime) time of obsevation (UTC)
output
------
x,y,z [meters] target ECEF location [0,Infinity)
"""
if numpy is None or Time is None:
raise ImportError('eci2ecef requires Numpy and AstroPy')
t = numpy.atleast_1d(t)
if isinstance(t[0], string_types): #don't just ram in in case it's float
t = str2dt(t)
if isinstance(t[0], datetime):
gst = Time(t).sidereal_time('apparent', 'greenwich').radian
elif isinstance(t[0],float):
gst = t
else:
raise TypeError('eci2ecef: time must be datetime or radian float')
assert isinstance(gst[0], float) # must be in radians!
eci = numpy.atleast_2d(eci)
N, trip = eci.shape
if eci.ndim > 2 or trip != 3:
raise ValueError('eci triplets must be shape (N,3)')
"""ported from:
https://github.com/dinkelk/astrodynamics/blob/master/rot3.m
"""
ecef = numpy.empty_like(eci)
for i in range(N):
#ecef[i, :] = _rottrip(gst[i]) @ eci[i, :]
ecef[i, :] = _rottrip(gst[i]).dot(eci[i, :])
return ecef
def enu2ecef(e1, n1, u1, lat0, lon0, h0, ell=None, deg=True):
"""
Observer => Point
inputs:
e1, n1, u1 (meters) east, north, up
observer: lat0, lon0, h0 (degrees/radians,degrees/radians, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output
------
x,y,z [meters] target ECEF location [0,Infinity)
"""
x0, y0, z0 = geodetic2ecef(lat0, lon0, h0, ell, deg=deg)
dx, dy, dz = _enu2uvw(e1, n1, u1, lat0, lon0, deg=deg)
return x0 + dx, y0 + dy, z0 + dz
def geodetic2ecef(lat, lon, alt, ell=None, deg=True):
"""
Observer => Point
input:
-----
Target: lat, lon, h (altitude, meters)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output: ECEF x,y,z (meters)
"""
if ell is None:
ell = EarthEllipsoid()
if deg:
lat = radians(lat)
lon = radians(lon)
# radius of curvature of the prime vertical section
N = get_radius_normal(lat, ell)
# Compute cartesian (geocentric) coordinates given (curvilinear) geodetic
# coordinates.
x = (N + alt) * cos(lat) * cos(lon)
y = (N + alt) * cos(lat) * sin(lon)
z = (N * (ell.b / ell.a)**2 + alt) * sin(lat)
return x, y, z
def ned2ecef(n, e, d, lat0, lon0, h0, ell=None, deg=True):
"""
Observer => Point
input
-----
n,e,d [meters] North,east, down [0,Infinity)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output:
------
ECEF x,y,z (meters)
"""
return enu2ecef(e, n, -d, lat0, lon0, h0, ell, deg=deg)
#%% to ECI
def aer2eci(az, el, srange, lat0, lon0, h0, t, ell=None, deg=True):
"""
input
-----
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
t datetime.datetime of obseration
output
------
eci x,y,z (meters)
"""
if numpy is None:
raise ImportError('aer2eci requires Numpy')
x, y, z = aer2ecef(az, el, srange, lat0, lon0, h0, ell, deg)
return ecef2eci(numpy.column_stack((x, y, z)), t)
def ecef2eci(ecef, t):
"""
Point => Point
input
-----
ecef: Nx3 x,y,z (meters)
t: datetime.datetime
output
------
eci x,y,z (meters)
"""
if Time is None or numpy is None:
raise ImportError('ecef2eci requires Numpy and AstroPy')
t = numpy.atleast_1d(t)
if isinstance(t[0], | |
"""
Miscellaneous and sundry plotting functions for to please your visual cortex
"""
import typing
import warnings
from pathlib import Path
from typing import Dict, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import xarray as xr
from matplotlib import cm, colors, gridspec, image, transforms
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy import stats
from skimage.measure import label, regionprops
from statsmodels.stats.weightstats import DescrStatsW
from tqdm.auto import tqdm
from pharedox import constants
from pharedox import data_analysis as da
def imshow_r_stack(
imgs: xr.DataArray,
profile_data: xr.DataArray,
output_dir: Union[str, Path],
per_animal_cmap: bool = True,
fl_wvl: str = "410",
cmap: str = "coolwarm",
width: int = 80,
height: int = 30,
colorbar=True,
):
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
center = (np.array(imgs.shape[-2:]) / 2).astype(np.int)
wpad = int(width / 2)
hpad = int(height / 2)
for tp in tqdm(imgs.timepoint.values, leave=False, desc="timepoint"):
for pair in tqdm(imgs.pair.values, leave=False, desc="pair"):
filepath = output_dir.joinpath(f"timepoint={tp}_pair={pair}.pdf")
with PdfPages(filepath) as pdf:
i = 0
for animal in tqdm(imgs.animal.values, desc="animal", leave=False):
fig, ax = plt.subplots()
selector = dict(animal=animal, timepoint=tp, pair=pair)
im, cbar = imshow_ratio_normed(
imgs.sel(wavelength="r", **selector),
imgs.sel(wavelength=fl_wvl, **selector),
profile_data=profile_data.sel(wavelength="r", **selector),
prob=0.999,
colorbar=colorbar,
i_min=0,
i_max=3000,
cmap=cmap,
ax=ax,
)
ax.set_xlim(center[1] - wpad, center[1] + wpad)
ax.set_ylim(center[0] - hpad, center[0] + hpad)
ax.set_title(str(selector))
pdf.savefig()
if (i % 20) == 0:
plt.close("all")
i += 1
def generate_wvl_pair_timepoint_profile_plots(data: xr.DataArray, ignored_wvls=None):
"""
For each wavelength and pair in the given data, this function plots a line plot with
each color representing a unique strain. The line is the mean value across animals
for that strain, and the shaded regions are the 95% confidence intervals
Parameters
----------
data
ignored_wvls
"""
if ignored_wvls is None:
ignored_wvls = ["TL"]
strains = np.unique(data.strain.values)
cmap = plt.get_cmap("Set2")
colormap = dict(zip(strains, cmap.colors))
wvls = list(map(lambda x: x.lower(), data.wavelength.values))
for wvl in ignored_wvls:
try:
wvls.remove(wvl.lower())
except ValueError:
continue
for wvl in wvls:
for pair in data.pair.values:
for tp in data.timepoint.values:
fig, ax = plt.subplots()
for strain in strains:
strain_data = data.where(data["strain"] == strain, drop=True)
ax.plot(
strain_data.sel(wavelength=wvl, pair=pair, timepoint=tp).T,
color=colormap[strain],
alpha=0.5,
)
title = f"wavelength = {wvl} ; pair = {pair} ; timepoint = {tp}"
ax.set_title(title)
ax.legend(
[
plt.Line2D([0], [0], color=color, lw=4)
for color in cmap.colors[: len(strains)]
],
strains,
)
yield title, fig
def generate_avg_wvl_pair_profile_plots(
data: xr.DataArray, ignored_wvls: typing.List[str] = None
):
"""
For each wavelength and pair in the given data, this function plots a line plot with
each color representing a unique strain. The line is the mean value across animals
for that strain, and the shaded regions are the 95% confidence intervals
Parameters
----------
ignored_wvls
data : [type]
[description]
"""
if ignored_wvls is None:
ignored_wvls = ["TL"]
strains = np.unique(data.strain.values)
cmap = plt.get_cmap("Set2")
colormap = dict(zip(strains, cmap.colors))
wvls = list(map(lambda x: x.lower(), data.wavelength.values))
for wvl in ignored_wvls:
try:
wvls.remove(wvl.lower())
except ValueError:
continue
for wvl in wvls:
for pair in data.pair.values:
for tp in data.timepoint.values:
fig, ax = plt.subplots()
for strain in np.unique(data.strain.values):
strain_data = data.where(data["strain"] == strain, drop=True)
plot_profile_avg_with_bounds(
strain_data.sel(wavelength=wvl, pair=pair, timepoint=tp),
label=strain,
ax=ax,
color=colormap[strain],
)
title = f"wavelength = {wvl} ; pair = {pair} ; timepoint = {tp}"
ax.set_title(title)
ax.legend()
yield title, fig
def plot_err_with_region_summaries(
data: xr.DataArray,
measure_regions: Dict,
display_regions=None,
ax=None,
profile_color="black",
label=None,
):
st_color = "k"
mv_color = "tab:red"
if ax is None:
_, ax = plt.subplots()
if display_regions is None:
display_regions = measure_regions
df = da.fold_v_point_table(data, measure_regions)
df_avgs = df.reset_index().groupby("region").agg(["mean", "sem"]).reset_index()
xs = np.linspace(0, 1, data.position.size)
plot_profile_avg_with_sem_bounds(
100 * da.fold_error(data), xs=xs, ax=ax, color=profile_color, label=label
)
for region, region_err_mean, region_err_sem in zip(
df_avgs["region"],
df_avgs["fold_error_region"][1]["mean"],
df_avgs["fold_error_region"][1]["sem"],
):
try:
ax.axhline(
100 * region_err_mean,
*display_regions[region],
color=profile_color,
alpha=1,
lw=2,
solid_capstyle="butt",
)
ax.errorbar(
x=np.mean(display_regions[region]),
y=100 * region_err_mean,
yerr=100 * region_err_sem,
color=profile_color,
elinewidth=0.5,
capsize=1,
capthick=0.5,
)
except:
continue
ax.set_xlim(0, 1)
add_regions_to_axis(
ax, display_regions, alpha=0.3, hide_labels=True, skip=["medial_axis"]
)
ax.set_xlabel("position along midline")
def plot_stage_layout(
image_data: xr.DataArray, pair: int = 0
) -> sns.axisgrid.FacetGrid:
"""
Shows a scatter plot where each point is an animal located on the imaging stage and
the points are colored by strain.
A useful visualization to make sure that the strain map is accurate.
.. image:: _static/plot_stage_layout.png
Parameters
----------
image_data : xr.DataArray
The image data acquired by metamorph.
pair : int
The image pair to display
Returns
-------
seaborn.axisgrid.FacetGrid
The grid object returned by seaborns's lmplot
See Also
--------
io.load_tiff_as_hyperstack
seaborn.lmplot
"""
df = pd.DataFrame(
dict(
stage_x=image_data.sel(wavelength="410", pair=1).stage_x,
stage_y=image_data.sel(wavelength="410", pair=1).stage_y,
strain=image_data.sel(wavelength="410", pair=1).strain,
)
)
return sns.lmplot(x="stage_x", y="stage_y", data=df, hue="strain", fit_reg=False)
def ecdf_(data):
"""Compute ECDF"""
x = np.sort(data)
n = x.size
y = np.arange(1, n + 1) / n
return x, y
def cdf_plot(data, *args, **kwargs):
"""
Plot a CDF, compatible with Seaborn's FacetGrid
data
1-D vector of numbers to plot the CDF of
*args
ignored
**kwargs
keyword arguments passed onto ``plt.step``
"""
x, y = ecdf_(data)
plt.step(x, y, **kwargs)
def add_regions_to_axis(
ax,
regions: dict,
skip=None,
label_dist_bottom_percent: float = 0.03,
label_x_offset_percent: float = 0.005,
alpha: float = 0.03,
hide_labels: bool = False,
xs=None,
color="black",
**kwargs,
):
"""
TODO: Documentation
Parameters
----------
ax
the axis to add the regions to
regions
the region dictionary, formatted as such::
{
'pm3': [1, 10],
'pm4': [12, 30],
...
}
skip
the regions to skip plotting
label_dist_bottom_percent
the distance from the bottom of the axis that the region labels should be placed, expressed as a percentage of the axis height
label_x_offset_percent
the distance from the left of the region annotation, expressed as a percentage of the axis length
alpha
the opacity of the region annotations (0 = transparent, 1=opaque)
hide_labels
if True, does not add labels to regions
kwargs
these will be passed onto ``ax.axvspan``
"""
if skip is None:
skip = []
min_y, max_y = ax.get_ylim()
min_x, max_x = ax.get_xlim()
text_y = ((max_y - min_y) * label_dist_bottom_percent) + min_y
text_x_offset = (max_x - min_x) * label_x_offset_percent
for region, bounds in regions.items():
if region in skip:
continue
ax.axvspan(
bounds[0], bounds[1], alpha=alpha, color=color, linewidth=0, **kwargs
)
if not hide_labels:
ax.annotate(region, xy=(bounds[0] + text_x_offset, text_y))
def add_region_bars_to_axis(
ax, regions, skip=None, bar_height=8, bar_width=1, fontsize=3
):
if skip is None:
skip = []
for region, region_bounds in regions.items():
if region in skip:
continue
yy = -0.01
ax.annotate(
"",
xy=(region_bounds[0], yy),
xycoords=("data", "axes fraction"),
xytext=(region_bounds[1], yy),
textcoords=("data", "axes fraction"),
arrowprops=dict(
arrowstyle="-",
connectionstyle=f"bar,armA=-{bar_height},armB=-{bar_height},fraction=0.0",
capstyle="butt",
joinstyle="miter",
lw=bar_width,
),
annotation_clip=False,
)
ax.annotate(
region,
xy=((region_bounds[0] + region_bounds[1]) / 2, yy - 0.08),
xycoords=("data", "axes fraction"),
ha="center",
fontsize=fontsize,
)
ax.xaxis.labelpad = 25
def plot_profile_avg_with_bounds(
data,
ax=None,
confint_alpha=0.05,
label=None,
xs=None,
axis=0,
bounds: str = "ci",
**kwargs,
):
"""
TODO: Documentation
Parameters
----------
data
ax
confint_alpha
label
kwargs
Returns
-------
"""
with np.errstate(invalid="ignore"):
mean = np.nanmean(data, axis=0)
sem = stats.sem(data)
bounds_map = {
"ci": DescrStatsW(data).tconfint_mean(alpha=confint_alpha),
"sem": (mean - sem, mean + sem),
}
if ax is None:
ax = plt.gca()
if xs is None:
try:
# if the data is an xr.DataArray
xs = data.position
except ValueError:
# if it's a numpy array
xs = np.arange(len(data))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax.plot(xs, np.nanmean(data, axis=axis), label=label, **kwargs)
lower, upper = bounds_map[bounds]
kwargs.pop("linestyle", None)
kwargs.pop("linewidth", None)
kwargs.pop("lw", None)
ax.fill_between(xs, lower, upper, alpha=0.3, lw=0, **kwargs)
return ax
def imgs_to_rgb(
imgs,
r_min,
r_max,
cmap="coolwarm",
i_min=0,
i_max=None,
i_wvls=["410", "470"],
ratio_numerator="410",
ratio_denominator="470",
):
if i_max is None:
i_max = np.max(imgs.sel(wavelength=["410", "470"]))
try:
R = imgs.sel(wavelength="R")
except KeyError:
R = imgs.sel(wavelength=ratio_numerator) / imgs.sel(
wavelength=ratio_denominator
)
norm_ratio = colors.Normalize(vmin=r_min, vmax=r_max)
cmap = cm.get_cmap(cmap)
img_rgba = cmap(norm_ratio(R))
norm_fl = colors.Normalize(vmin=i_min, vmax=i_max, clip=True)
hsv_img = colors.rgb_to_hsv(img_rgba[..., :3]) # ignore the "alpha" channel
hsv_img[..., -1] = norm_fl(imgs.sel(wavelength=i_wvls).max(dim="wavelength"))
img_rgba = colors.hsv_to_rgb(hsv_img)
return img_rgba
def imshow_ratio_normed(
ratio_img,
fl_img,
profile_data=None,
prob=0.999,
cmap="coolwarm",
r_min=None,
r_max=None,
i_min=0,
i_max=None,
clip=True,
ax=None,
colorbar=False,
colorbar_kwargs_dict={},
**imshow_kwargs,
):
"""
Show the given ratio image, first converting to HSV and setting the "V" (value)
channel to be the given (normalized) intensity image
Parameters
----------
ratio_img
the ratio image to display
fl_img
the fluorescent intensity image with which to "value-correct" the ratio image.
A good choice here is the max value of both intensity channels used in the
ratio.
profile_data
the midline profile data corresponding to the ratio image. This is used to
center and to choose min/max values for the ratio colormap.
prob
The "confidence interval" around the center of the ratio values to include in
the | |
#!/usr/bin/python3
r'''Triangulation uncertainty quantification test
We look at the triangulated position computed from a pixel observation in two
cameras. Calibration-time noise and triangulation-time noise both affect the
accuracy of the triangulated result. This tool samples both of these noise
sources to make sure the analytical uncertainty predictions are correct
'''
import sys
import argparse
import re
import os
def parse_args():
parser = \
argparse.ArgumentParser(description = __doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--fixed',
type=str,
choices=('cam0','frames'),
default = 'cam0',
help='''Are we putting the origin at camera0, or are all the frames at a fixed (and
non-optimizeable) pose? One or the other is required.''')
parser.add_argument('--model',
type=str,
choices=('opencv4','opencv8','splined'),
default = 'opencv4',
help='''Which lens model we're using. Must be one of
('opencv4','opencv8','splined')''')
parser.add_argument('--Nframes',
type=int,
default=50,
help='''How many chessboard poses to simulate. These are dense observations: every
camera sees every corner of every chessboard pose''')
parser.add_argument('--Nsamples',
type=int,
default=500,
help='''How many random samples to evaluate''')
parser.add_argument('--Ncameras',
type = int,
default = 2,
help='''How many calibration-time cameras to simulate.
We will use 2 of these for triangulation, selected with
--cameras''')
parser.add_argument('--cameras',
type = int,
nargs = 2,
default = (0,1),
help='''Which cameras we're using for the triangulation.
These need to be different, and in [0,Ncameras-1]. The
vanilla case will have Ncameras=2, so the default value
for this argument (0,1) is correct''')
parser.add_argument('--do-sample',
action='store_true',
help='''By default we don't run the time-intensive
samples of the calibration solves. This runs a very
limited set of tests, and exits. To perform the full set
of tests, pass --do-sample''')
parser.add_argument('--stabilize-coords',
action = 'store_true',
help='''Whether we report the triangulation in the camera-0 coordinate system (which
is moving due to noise) or in a stabilized coordinate
system based on the frame poses''')
parser.add_argument('--cull-left-of-center',
action = 'store_true',
help='''If given, the calibration data in the left half of the imager is thrown
out''')
parser.add_argument('--q-calibration-stdev',
type = float,
default = 0.0,
help='''The observed_pixel_uncertainty of the chessboard
observations at calibration time. Defaults to 0.0. At
least one of --q-calibration-stdev and
--q-observation-stdev MUST be given as > 0''')
parser.add_argument('--q-observation-stdev',
type = float,
default = 0.0,
help='''The observed_pixel_uncertainty of the point
observations at triangulation time. Defaults to 0.0. At
least one of --q-calibration-stdev and
--q-observation-stdev MUST be given as > 0''')
parser.add_argument('--q-observation-stdev-correlation',
type = float,
default = 0.0,
help='''By default, the noise in the observation-time
pixel observations is assumed independent. This isn't
entirely realistic: observations of the same feature in
multiple cameras originate from an imager correlation
operation, so they will have some amount of correlation.
If given, this argument specifies how much correlation.
This is a value in [0,1] scaling the stdev. 0 means
"independent" (the default). 1.0 means "100%%
correlated".''')
parser.add_argument('--baseline',
type = float,
default = 2.,
help='''The baseline of the camera pair. This is the
horizontal distance between each pair of adjacent
cameras''')
parser.add_argument('--observed-point',
type = float,
nargs = 3,
required = True,
help='''The world coordinate of the observed point. Usually this will be ~(small, 0,
large). The code will evaluate two points together: the
one passed here, and the same one with a negated x
coordinate''')
parser.add_argument('--cache',
type=str,
choices=('read','write'),
help=f'''A cache file stores the recalibration results;
computing these can take a long time. This option allows
us to or write the cache instead of sampling. The cache
file is hardcoded to a cache file (in /tmp). By default,
we do neither: we don't read the cache (we sample
instead), and we do not write it to disk when we're
done. This option is useful for tests where we reprocess
the same scenario repeatedly''')
parser.add_argument('--make-documentation-plots',
type=str,
help='''If given, we produce plots for the
documentation. Takes one argument: a string describing
this test. This will be used in the filenames and titles
of the resulting plots. Leading directories will be
used; whitespace and funny characters in the filename
are allowed: will be replaced with _. To make
interactive plots, pass ""''')
parser.add_argument('--ellipse-plot-radius',
type=float,
help='''By default, the ellipse plot autoscale to show the data and the ellipses
nicely. But that means that plots aren't comparable
between runs. This option can be passed to select a
constant plot width, which allows such comparisons''')
parser.add_argument('--terminal-pdf',
type=str,
help='''The gnuplotlib terminal for --make-documentation-plots .PDFs. Omit this
unless you know what you're doing''')
parser.add_argument('--terminal-svg',
type=str,
help='''The gnuplotlib terminal for --make-documentation-plots .SVGs. Omit this
unless you know what you're doing''')
parser.add_argument('--terminal-png',
type=str,
help='''The gnuplotlib terminal for --make-documentation-plots .PNGs. Omit this
unless you know what you're doing''')
parser.add_argument('--explore',
action='store_true',
help='''If given, we drop into a REPL at the end''')
args = parser.parse_args()
if args.Ncameras < 2:
raise Exception("--Ncameras must be given at least 2 cameras")
if args.cameras[0] == args.cameras[1]:
raise Exception("--cameras must select two different cameras")
if args.cameras[0] < 0 or args.cameras[0] >= args.Ncameras:
raise Exception("--cameras must select two different cameras, each in [0,Ncameras-1]")
if args.cameras[1] < 0 or args.cameras[1] >= args.Ncameras:
raise Exception("--cameras must select two different cameras, each in [0,Ncameras-1]")
if args.q_calibration_stdev <= 0.0 and \
args.q_observation_stdev <= 0.0:
raise Exception('At least one of --q-calibration-stdev and --q-observation-stdev MUST be given as > 0')
return args
args = parse_args()
terminal = dict(pdf = args.terminal_pdf,
svg = args.terminal_svg,
png = args.terminal_png,
gp = 'gp')
pointscale = dict(pdf = 1,
svg = 1,
png = 1,
gp = 1)
pointscale[""] = 1.
def shorter_terminal(t):
# Adjust the terminal string to be less tall. Makes the multiplots look
# better: less wasted space
m = re.match("(.*)( size.*?,)([0-9.]+)(.*?)$", t)
if m is None: return t
return m.group(1) + m.group(2) + str(float(m.group(3))*0.8) + m.group(4)
if args.make_documentation_plots:
d,f = os.path.split(args.make_documentation_plots)
args.make_documentation_plots_extratitle = f
args.make_documentation_plots_path = os.path.join(d, re.sub(r"[^0-9a-zA-Z_\.\-]", "_", f))
print(f"Will write documentation plots to {args.make_documentation_plots_path}-xxxx.pdf and .png and .svg")
if terminal['svg'] is None: terminal['svg'] = 'svg size 800,600 noenhanced solid dynamic font ",14"'
if terminal['pdf'] is None: terminal['pdf'] = 'pdf size 8in,6in noenhanced solid color font ",12"'
if terminal['png'] is None: terminal['png'] = 'pngcairo size 1024,768 transparent noenhanced crop font ",12"'
else:
args.make_documentation_plots_extratitle = None
extraset = dict()
for k in pointscale.keys():
extraset[k] = f'pointsize {pointscale[k]}'
testdir = os.path.dirname(os.path.realpath(__file__))
# I import the LOCAL mrcal since that's what I'm testing
sys.path[:0] = f"{testdir}/..",
import mrcal
import testutils
import copy
import numpy as np
import numpysane as nps
import pickle
from test_calibration_helpers import calibration_baseline,calibration_sample,grad
############# Set up my world, and compute all the perfect positions, pixel
############# observations of everything
fixedframes = (args.fixed == 'frames')
object_spacing = 0.1
object_width_n = 10
object_height_n = 9
calobject_warp_true = np.array((0.002, -0.005))
# I want the RNG to be deterministic
np.random.seed(0)
extrinsics_rt_fromref_true = np.zeros((args.Ncameras,6), dtype=float)
extrinsics_rt_fromref_true[:,:3] = np.random.randn(args.Ncameras,3) * 0.1
extrinsics_rt_fromref_true[:, 3] = args.baseline * np.arange(args.Ncameras)
extrinsics_rt_fromref_true[:,4:] = np.random.randn(args.Ncameras,2) * 0.1
# cam0 is at the identity. This makes my life easy: I can assume that the
# optimization_inputs returned by calibration_baseline() use the same ref
# coordinate system as these transformations. I explicitly state this by passing
# calibration_baseline(allow_nonidentity_cam0_transform=False) later
extrinsics_rt_fromref_true[0] *= 0
# shape (Npoints,3)
p_triangulated_true0 = np.array((args.observed_point,
args.observed_point),
dtype=float)
# first point has x<0
p_triangulated_true0[0,0] = -np.abs(p_triangulated_true0[0,0])
# second point is the same, but with a negated x: x>0
p_triangulated_true0[1,0] = -p_triangulated_true0[0,0]
Npoints = p_triangulated_true0.shape[0]
@nps.broadcast_define( (('Nintrinsics',),('Nintrinsics',),
(6,),(6,),(6,),
('Nframes',6), ('Nframes',6),
('Npoints',2,2)),
('Npoints',3))
def triangulate_nograd( intrinsics_data0, intrinsics_data1,
rt_cam0_ref, rt_cam0_ref_baseline, rt_cam1_ref,
rt_ref_frame,
rt_ref_frame_baseline,
q,
lensmodel,
stabilize_coords = True):
q = nps.atleast_dims(q,-3)
rt01 = mrcal.compose_rt(rt_cam0_ref,
mrcal.invert_rt(rt_cam1_ref))
# all the v have shape (...,3)
vlocal0 = \
mrcal.unproject(q[...,0,:],
lensmodel, intrinsics_data0)
vlocal1 = \
mrcal.unproject(q[...,1,:],
lensmodel, intrinsics_data1)
v0 = vlocal0
v1 = \
mrcal.rotate_point_r(rt01[:3], vlocal1)
# The triangulated point in the perturbed camera-0 coordinate system.
# Calibration-time perturbations move this coordinate system, so to get
# a better estimate of the triangulation uncertainty, we try to
# transform this to the original camera-0 coordinate system; the
# stabilization path below does that.
#
# shape (..., 3)
p_triangulated0 = \
mrcal.triangulate_leecivera_mid2(v0, v1, rt01[3:])
if not stabilize_coords:
return p_triangulated0
# Stabilization path. This uses the "true" solution, so I cannot do
# this in the field. But I CAN do this in the randomized trials in
# the test. And I can use the gradients to propagate the uncertainty
# of this computation in the field
#
# Data flow:
# point_cam_perturbed -> point_ref_perturbed -> point_frames
# point_frames -> point_ref_baseline -> point_cam_baseline
p_cam0_perturbed = p_triangulated0
p_ref_perturbed = mrcal.transform_point_rt(rt_cam0_ref, p_cam0_perturbed,
inverted = | |
but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018-2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Deep Q Network
"""
import argparse
from collections import OrderedDict
from typing import Dict, List, Optional, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from torch import optim as optim
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from pl_bolts.datamodules.experience_source import Experience, ExperienceSourceDataset
from pl_bolts.losses.rl import dqn_loss
from pl_bolts.models.rl.common.gym_wrappers import make_environment
from pl_bolts.models.rl.common.memory import MultiStepBuffer
from pl_bolts.models.rl.common.networks import CNN
from gym import Env
from abc import ABC
from typing import List
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from functools import partial
import collections
import torchfunc
class Agent(ABC):
"""Basic agent that always returns 0"""
def __init__(self, net: nn.Module):
self.net = net
def __call__(self, state: torch.Tensor, device: str, *args, **kwargs) -> List[int]:
"""
Using the given network, decide what action to carry
Args:
state: current state of the environment
device: device used for current batch
Returns:
action
"""
return [0]
class ValueAgent(Agent):
"""Value based agent that returns an action based on the Q values from the network"""
def __init__(
self,
net: nn.Module,
action_space: int,
eps_start: float = 1.0,
eps_end: float = 0.2,
eps_frames: float = 1000,
):
super().__init__(net)
self.action_space = action_space
self.eps_start = eps_start
self.epsilon = eps_start
self.eps_end = eps_end
self.eps_frames = eps_frames
self.recorder=torchfunc.hooks.recorders.ForwardPre()
self.recorder.modules(self.net)
@torch.no_grad()
def __call__(self, state: torch.Tensor, device: str) -> List[int]:
"""
Takes in the current state and returns the action based on the agents policy
Args:
state: current state of the environment
device: the device used for the current batch
Returns:
action defined by policy
"""
if not isinstance(state, list):
state = [state]
if np.random.random() < self.epsilon:
action = self.get_random_action(state)
else:
action = self.get_action(state, device)
return action
def get_random_action(self, state: torch.Tensor) -> int:
"""returns a random action"""
actions = []
for i in range(len(state)):
action = np.random.randint(0, self.action_space)
actions.append(action)
return actions
def get_action(self, state: torch.Tensor, device: torch.device):
"""
Returns the best action based on the Q values of the network
Args:
state: current state of the environment
device: the device used for the current batch
Returns:
action defined by Q values
"""
if not isinstance(state, torch.Tensor):
state = torch.tensor(state, device=device)
q_values = self.net(state)
_, actions = torch.max(q_values, dim=1)
return actions.detach().cpu().numpy()
def update_epsilon(self, step: int) -> None:
"""
Updates the epsilon value based on the current step
Args:
step: current global step
"""
self.epsilon = max(self.eps_end, self.eps_start - (step + 1) / self.eps_frames)
class DQN(pl.LightningModule):
"""
Basic DQN Model
PyTorch Lightning implementation of `DQN <https://arxiv.org/abs/1312.5602>`_
Paper authors: <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>.
Model implemented by:
- `<NAME> <https://github.com/djbyrne>`
Example:
>>> from pl_bolts.models.rl.dqn_model import DQN
...
>>> model = DQN("PongNoFrameskip-v4")
Train::
trainer = Trainer()
trainer.fit(model)
Note:
This example is based on:
https://github.com/PacktPublishing/Deep-Reinforcement-Learning-Hands-On-Second-Edition/blob/master/Chapter06/02_dqn_pong.py
Note:
Currently only supports CPU and single GPU training with `distributed_backend=dp`
"""
def __init__(
self,
env: str,
eps_start: float = 1.0,
eps_end: float = 0.02,
eps_last_frame: int = 150000,
sync_rate: int = 1000,
gamma: float = 0.99,
learning_rate: float = 1e-4,
batch_size: int = 32,
replay_size: int = 100000,
warm_start_size: int = 10000,
avg_reward_len: int = 100,
min_episode_reward: int = -21,
seed: int = 123,
batches_per_epoch: int = 1000,
n_steps: int = 1,
**kwargs,
):
"""
Args:
env: gym environment tag
eps_start: starting value of epsilon for the epsilon-greedy exploration
eps_end: final value of epsilon for the epsilon-greedy exploration
eps_last_frame: the final frame in for the decrease of epsilon. At this frame espilon = eps_end
sync_rate: the number of iterations between syncing up the target network with the train network
gamma: discount factor
learning_rate: learning rate
batch_size: size of minibatch pulled from the DataLoader
replay_size: total capacity of the replay buffer
warm_start_size: how many random steps through the environment to be carried out at the start of
training to fill the buffer with a starting point
avg_reward_len: how many episodes to take into account when calculating the avg reward
min_episode_reward: the minimum score that can be achieved in an episode. Used for filling the avg buffer
before training begins
seed: seed value for all RNG used
batches_per_epoch: number of batches per epoch
n_steps: size of n step look ahead
"""
super().__init__()
# Environment
self.exp = None
self.env = self.make_environment(env, seed)
self.test_env = self.make_environment(env)
self.obs_shape = self.env.observation_space.shape
self.n_actions = self.env.action_space.n
# Model Attributes
self.buffer = None
self.dataset = None
self.net = None
self.target_net = None
self.build_networks()
self.agent = ValueAgent(
self.net,
self.n_actions,
eps_start=eps_start,
eps_end=eps_end,
eps_frames=eps_last_frame,
)
# Hyperparameters
self.sync_rate = sync_rate
self.gamma = gamma
self.lr = learning_rate
self.batch_size = batch_size
self.replay_size = replay_size
self.warm_start_size = warm_start_size
self.batches_per_epoch = batches_per_epoch
self.n_steps = n_steps
self.save_hyperparameters()
# Metrics
self.total_episode_steps = [0]
self.total_rewards = [0]
self.done_episodes = 0
self.total_steps = 0
# Average Rewards
self.avg_reward_len = avg_reward_len
for _ in range(avg_reward_len):
self.total_rewards.append(torch.tensor(min_episode_reward, device=self.device))
self.avg_rewards = float(np.mean(self.total_rewards[-self.avg_reward_len:]))
self.state = self.env.reset()
def run_n_episodes(self, env, n_epsiodes: int = 1, epsilon: float = 1.0) -> List[int]:
"""
Carries out N episodes of the environment with the current agent
Args:
env: environment to use, either train environment or test environment
n_epsiodes: number of episodes to run
epsilon: epsilon value for DQN agent
"""
total_rewards = []
self.im_arr=[]
self.actions_record=[]
for _ in range(n_epsiodes):
episode_state = env.reset()
done = False
episode_reward = 0
while not done:
self.agent.epsilon = epsilon
action = self.agent(episode_state, self.device)
#print(action)
import matplotlib.pyplot as plt
#plt.imshow(episode_state[0,:,:])
#plt.show()
self.im_arr.append(np.mean(episode_state,axis=0).flatten())
self.actions_record.append(action[0])
next_state, reward, done, _ = env.step(action[0])
episode_state = next_state
episode_reward += reward
total_rewards.append(episode_reward)
self.activations=self.agent.recorder.data
self.im_arr=np.array(self.im_arr)
return total_rewards
def populate(self, warm_start: int) -> None:
"""Populates the buffer with initial experience"""
if warm_start > 0:
self.state = self.env.reset()
for _ in range(warm_start):
self.agent.epsilon = 1.0
action = self.agent(self.state, self.device)
#print(action)
next_state, reward, done, _ = self.env.step(action[0])
exp = Experience(state=self.state, action=action[0], reward=reward, done=done, new_state=next_state)
self.buffer.append(exp)
self.state = next_state
if done:
self.state = self.env.reset()
def build_networks(self) -> None:
"""Initializes the DQN train and target networks"""
self.net = CNN(self.obs_shape, self.n_actions)
self.target_net = CNN(self.obs_shape, self.n_actions)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Passes in a state x through the network and gets the q_values of each action as an output
Args:
x: environment state
Returns:
q values
"""
output = self.net(x)
return output
def train_batch(self, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Contains the logic for | |
<filename>src/pdf_summary.py
#!/usr/bin/env python
from astropy.io import fits
import ConfigParser
import datetime
import log
import matplotlib
# matplotlib.use('Agg')
from matplotlib import pyplot
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.pyplot import table
from pyraf import iraf
import numpy
import os
import re
import utils
# ----------------------------------------------------------------------------------------------------------------------
def start(configfile):
logger = log.getLogger('write')
config = ConfigParser.RawConfigParser()
config.optionxform = str # make options case-sensitive
config.read(configfile)
iraf.onedspec()
for path, process in config.items("ScienceDirectories"): # Returns list of (variable, value) pairs
logger.debug('%s = %s', path, process)
if not process:
logger.debug('Skipping %s', path)
continue
pdf = PdfPages(path + '/Final/summary.pdf')
sci = imexam(path)
tel = imexam(path + '/Telluric')
sci['SNR'] = estimate_snr(path + '/Intermediate/ zbduvsrc_comb_order3_MEF.fits[1]') # flam1.fits
tel['SNR'] = estimate_snr(path + '/Telluric/Intermediate/hvsrc_comb_order3_SEF.fits') # ftell_nolines1
sci['PARANGLE'] = parallactic(dec=float(sci['DEC']),
ha=hms2deg(sci['HA']),
lat=location(sci['OBSERVAT'])['latitude'],
az=float(sci['AZIMUTH']), units='degrees')
tel['PARANGLE'] = parallactic(dec=float(tel['DEC']),
ha=hms2deg(tel['HA']),
lat=location(tel['OBSERVAT'])['latitude'],
az=float(tel['AZIMUTH']), units='degrees')
logger.debug('SCI: %s', sci)
logger.debug('TEL: %s', tel)
fig = pyplot.figure()
ax = fig.add_subplot(211, frame_on=False)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
# TOP TABLE:
labels = [sci['GEMPRGID'] + '\n' + sci['DATE-OBS'], 'Total counts, K', 'FWHM ("), K', "S/N, 2.1-2.2 um", 'Airmass', 'HA']
text = [[sci['OBJECT'], sci['PEAK'], sci['FWHM'], sci['SNR'], sci['AIRMASS'], sci['HA']],
[tel['OBJECT'], tel['PEAK'], tel['FWHM'], tel['SNR'], tel['AIRMASS'], tel['HA']]]
table(cellText=text, colLabels=labels, loc='upper center')
# BOTTOM TABLE:
labels = [sci['GEMPRGID'] + '\n' + sci['DATE-OBS'], 'Slit Angle', 'Par. Angle', 'Diff', 'IQ', 'CC', 'WV', 'SB']
text = [[sci['OBJECT'], sci['PA'], sci['PARANGLE'], abs(sci['PA'] - sci['PARANGLE']), sci['RAWIQ'], sci['RAWCC'], sci['RAWWV'], sci['RAWBG']],
[tel['OBJECT'], tel['PA'], tel['PARANGLE'], abs(tel['PA'] - tel['PARANGLE']), tel['RAWIQ'], tel['RAWCC'], tel['RAWWV'], tel['RAWBG']]]
table(cellText=text, colLabels=labels, loc='center')
version = '0.9' # TODO: Fix the version
date = datetime.datetime.now()
ax.text(0.0, 0.28, 'GNIRS-Pype version ' + version + ", " + str(date), size=5)
# TODO: Query Simbad for the target redshift and add to config file.
# Possibly do it at the same time as standard star lookup?
ax = fig.add_subplot(212)
sci_wave, sci_flux = numpy.loadtxt(path + '/Final/' + sci['OBJECT'] + '_src.txt', unpack=True)
pyplot.plot(sci_wave, sci_flux, color='black', marker='', linestyle='-', linewidth=0.5, label=sci['OBJECT'])
pyplot.ylim(0.0, 1.1 * numpy.amax(sci_flux)) # force a lower limit of zero
vega_wave, vega_flux = numpy.loadtxt(config.get('defaults', 'runtimeData') + 'vega.txt', unpack=True)
# TODO: if redshift: vega_wav = vega_wav / (1 + redshift)
vega_flux *= 1.05 * numpy.amax(sci_flux) / numpy.amax(vega_flux)
pyplot.plot(vega_wave, vega_flux, color='blue', marker='', linestyle='--', linewidth=0.5, label='Vega')
# TODO: if flux calibrated set the proper axes labels:
ylabel = r'erg cm$^{-2}$ s$^{-1}\ \AA^{-1}$'
# ylabel = r'F$_{\lambda}$, arbitrary units'
pyplot.ylabel(ylabel, size=8)
# xlabel should be either "Rest" or "Observed" depending on whether a redshift was found and corrected for:
xlabel = r'Observed wavelength, $\mu$m'
pyplot.xlabel(xlabel, size=8)
fig.tight_layout()
pyplot.legend(loc='best', fancybox=True, numpoints=1, prop={'size': 6})
pyplot.grid(linewidth=0.25)
pdf.savefig(fig)
pyplot.close(fig)
# --------------------------------------------------------------------------------------------------------------
# Plot the separate orders so the user can judge if there are any unacceptable offsets
# and edit the regions used for combining if they like:
regions = {}
for order, r in config.items('orderScalingRegions'):
regions[int(order)] = r
logger.debug('orderScalingRegions: %s', regions)
prefix = \
config.get('runtimeFilenames', 'finalPrefix') + \
config.get('runtimeFilenames', 'fluxCalibPrefix') + \
config.get('runtimeFilenames', 'dividedTelContinuumPrefix') + \
config.get('runtimeFilenames', 'telluricPrefix') + \
config.get('runtimeFilenames', 'extractRegularPrefix')
combinedsrc = config.get('runtimeFilenames', 'combinedsrc')
plot_orders(
filelist=utils.make_list(prefix + utils.nofits(combinedsrc), regions=regions, suffix='.txt'),
path=path + '/Intermediate/',
output=pdf)
if config.getboolean('extractSpectra1D', 'extractFullSlit'):
plot_orders("flamfull", "../PRODUCTS/orders_fullslit.pdf")
if config.getboolean('extractSpectra1D', 'extractStepwise'):
for k in range(1, steps):
plot_orders("flamstep"+str(k)+"_", "../PRODUCTS/orders_step"+str(k)+".pdf")
pdf.close()
return
# ----------------------------------------------------------------------------------------------------------------------
def imexam(path, ypos=340):
"""
Measure the spectrum peak and FWHM
:param path: target path of image to measure
:param ypos: Y-position to perform measurements [340 pix]
:return: dictionary of measurements {? overkill for only 2 values ?}
"""
logger = log.getLogger('datasheet.imexam')
fits_file = 'Intermediate/src_comb.fits'
ap_file = 'Intermediate/database/apsrc_comb_SCI_1_'
original_path = os.getcwd()
iraf.chdir(path) # This shouldn't be necessary, but IRAF has path length limits
with open(ap_file, 'r') as f:
for line in f.readlines():
if 'center' in line:
xpos = float(line.split()[1])
break
logger.debug('Spectrum X-position: %.2f pix', xpos)
cursor = 'tmp.cur' # Write a cursor file for imexam
with open(cursor, 'w') as f:
f.write('%.3f %.3f\n' % (xpos, ypos))
logger.info('Running IRAF imexam to measure the spectrum peak and FHWM...')
iraf.unlearn(iraf.imexam)
iraf.unlearn(iraf.jimexam) # jimexam = 1-dimensional gaussian line fit
# iraf.jimexam.naverage = 50 # Number of lines, columns, or width perpendicular to a vector to average
# iraf.jimexam.width = 100 # Width of background region for background subtraction (pix)
# iraf.jimexam.rplot = 100 # Radius to which the radial profile or 1D profile fits are plotted (pix)
# iraf.jimexam.sigma = # Initial sigma (pix)
# iraf.imexam.graphics = 'stgkern' # Force the use of the standard IRAF graphics kernel
logger.debug('iraf.jimexam.sigma = %0.3f', iraf.jimexam.sigma)
logger.debug('iraf.jimexam.naverage = %0.3f', iraf.jimexam.naverage)
logfile = 'tmp.log'
iraf.imexam(
input=fits_file + '[SCI,1]', frame=1, output='', logfile=logfile, keeplog='yes', defkey='j',
ncstat=5, nlstat=5, imagecur=cursor, use_display='no', Stdout=1)
logger.debug('Parsing imexam results from the log file...')
peak = None
fwhm = None
with open(logfile) as f:
for line in f:
if '#' in line:
continue
logger.debug('%s', line.strip())
vals = line.replace('=', ' ').split()
if vals[0] == 'Lines': # record measure of x center
center = float(vals[3])
peak = float(vals[5])
fwhm = float(vals[9])
break
logger.debug('center = %s peak = %s fwhm = %s', center, peak, fwhm)
data = {'PEAK': peak, 'FWHM': fwhm}
logger.debug('Cleaning up...')
for f in [cursor, logfile]:
os.remove(f)
logger.debug('Reading some FITS header keywords...')
header = fits.open(fits_file)[0].header
for key in ['GEMPRGID', 'AIRMASS', 'RA', 'DEC', 'HA', 'AZIMUTH', 'PA',
'OBSERVAT', 'RAWIQ', 'RAWCC', 'RAWWV', 'RAWBG', 'DATE-OBS']:
try:
data[key] = header[key].strip() if isinstance(header[key], str) else header[key]
except:
logger.warning('%s[%s] is undefined', f, key)
data[key] = None
data['OBJECT'] = re.sub('[^a-zA-Z0-9]', '', header['OBJECT']) # replace non-alphanumeric characters
iraf.chdir(original_path)
logger.debug('data: %s', data)
return data
# ----------------------------------------------------------------------------------------------------------------------
def estimate_snr(onedspectrum, wav1=21000, wav2=22000, interactive=False):
"""
Estimate Signal-to-Noise ratio
:param onedspectrum: input one-dimensional (extracted) spectrum
:param wav1: starting wavelength rof ange to fit and measure
:param wav2: ending wavelength of range to fit and measure
:param interactive:
:return: signal-to-noise ratio (float)
"""
logger = log.getLogger('datasheet.snr')
logger.info('Estimating S/N...')
output = 'tmp.fits'
stdout = 'tmp.out'
cursor = 'tmp.cur'
logfile = 'tmp.log'
with open(cursor, 'w') as f: # Generate a cursor file for bplot
f.write('%d 0 1 m\n' % wav1)
f.write('%d 0 1 m\n' % wav2)
f.write('q')
logger.debug('continuum input: %s', onedspectrum)
logger.debug('sample: %d:%d', wav1, wav2)
iraf.sfit.logfile = logfile
iraf.continuum(
input=onedspectrum, output=output, lines='*', bands='1', type='ratio', replace=False, wavescale=True,
logscale=False, override=False, logfile=logfile, interactive=interactive, sample='%d:%d' % (wav1, wav2),
naverage=1, function='spline3', order=3, low_rej=2, high_rej=3, niterate=5, grow=1)
iraf.splot.save_file = logfile
iraf.bplot(
images=output, apertures="", band=1, cursor=cursor, next_image="",
new_image="", overwrite="no", spec2="", constant=0.0, wavelength=0.0, linelist="",
wstart=0.0, wend=0.0, dw=0.0, boxsize=2, Stdout=stdout) # graphics="stgkern", StdoutG="dev$null")
logger.debug('Parsing output...')
snr = None
with open(stdout, 'r') as f:
for line in f.readlines():
if 'snr' in line:
snr = float(line.split()[-1])
logger.debug('SNR: %s', snr)
for f in [cursor, logfile, output, stdout]:
os.remove(f)
return snr
# ----------------------------------------------------------------------------------------------------------------------
def parallactic(dec, ha, lat, az, units='degrees'):
"""
Compute the parallactic angle
:param dec: target declination
:param ha: hour angle
:param lat: observatory latitude
:param az: target azimuth
:param units: degrees or radians for input and ouput quantities
:return: parallactic angle (float)
"""
logger = log.getLogger('parallactic')
if units == 'degrees':
dec *= numpy.pi / 180.
ha *= numpy.pi / 180.
lat *= numpy.pi / 180.
az *= numpy.pi / 180.
if numpy.cos(dec) != 0.0:
sinp = -1.0*numpy.sin(az)*numpy.cos(lat)/numpy.cos(dec)
cosp = -1.0*numpy.cos(az)*numpy.cos(ha)-numpy.sin(az)*numpy.sin(ha)*numpy.sin(lat)
pa = numpy.arctan2(sinp, cosp)
else:
if lat > 0.0:
pa = numpy.pi
else:
pa = 0.0
if units == 'degrees':
pa *= 180. / numpy.pi
logger.debug('Parallactic Angle: %.3f %s', pa, units)
return pa
# ----------------------------------------------------------------------------------------------------------------------
def hms2deg(angle):
"""Convert sexagesimal HH:MM:SS.sss to decimal degrees"""
h, m, s = angle.split(':')
hours = float(h) + float(m)/60. + float(s)/3600.
return hours / 24. * 360.
# ----------------------------------------------------------------------------------------------------------------------
def location(observatory):
"""Return the observatory location as a dictionary"""
if observatory == 'Gemini-North':
latitude = 297.35709 # 19:49:25.7016
longitude = -155.46906 # -155:28:08.616
elevation = 4213 # meters
elif observatory == 'Gemini-South':
latitude = 453.61125 # -30:14:26.700
longitude = -70.7366933333 # -70:44:12.096
elevation = 2722 # meters
else:
raise SystemExit('Unknown observatory')
return {'latitude': latitude, 'longitude': longitude, 'elevation': elevation}
# ----------------------------------------------------------------------------------------------------------------------
def plot_orders(filelist, path, output):
logger = log.getLogger('plot_orders')
logger.debug('filelist: %s', filelist)
logger.debug('path: %s', path)
logger.debug('output: %s', output)
fig = pyplot.figure()
goodbits = []
for f in filelist:
filename, start, end, junk = re.split(r'[\[:\]]', f)
start = int(start)
end = int(end)
logger.debug('filename: %s', filename)
logger.debug('start: %s, end: %s', start, end)
wave, flux = numpy.loadtxt(path + filename, unpack=True)
pyplot.plot(wave, flux, color='red', marker='', linestyle='-', linewidth=0.5) # label the orders?
pyplot.plot(wave[start:end], flux[start:end], color='green', marker='', linestyle='-', linewidth=0.5)
goodbits.extend(flux[start:end])
pyplot.ylim(numpy.amin(goodbits), 1.05 * numpy.amax(goodbits))
pyplot.xlabel(r"$\mu$m, observed")
pyplot.ylabel(r"F$_{\lambda}$")
output.savefig(fig)
pyplot.close(fig)
return
# | |
#!/usr/bin/python
"""
SPDX-License-Identifier: Apache-2.0
Copyright (c) 2019 STMicroelectronics.
This script define Stm32SerieUpdate class
to be used by update_stm32_package.py
"""
import os
import shutil
import subprocess
import re
from pathlib import Path
import logging
from jinja2 import Environment, FileSystemLoader
import ble_library
from common_utils import common_utils
STM32_CUBE_REPO_BASE = "https://github.com/STMicroelectronics/STM32Cube"
"""GitHub URL to get STM32Cube"""
SCRIPT_DIR = Path(__file__).absolute().parent
"""Script directory."""
REPO_ROOT = SCRIPT_DIR / ".."
"""Repository root (used for input/output default folders)."""
# list of created files. It is necessary to remove all of them
# as they are fully created when applying zephyr patch
zephyr_file_created = [
"CMakeLists.txt",
"README",
"drivers/include/stm32_assert.h",
]
def version_tuple(version):
"""Remove 'v' in front of version and convert it to tuple,
so that versions can be compared
"""
v = re.sub("v", r"", version)
return tuple(map(int, (v.split("."))))
class Stm32SerieUpdate:
"""class Stm32SerieUpdate"""
def __init__(
self,
stm32_serie,
stm32cube_repo_path,
noclean,
version_update,
debug,
):
"""Class Stm32SerieUpdate constructor
Args:
stm32_serie: stm32 serie ex:stm32f3xx
stm32cube_repo_path: directory path where to fetch github repo
noclean: boolean to clean or not github repo after update done
version_update: string to force a specified version to be updated
debug: boolean to set log debug level
Returns:
return previous zephyr cube version.
Raises:
ValueError: If stm32 serie is not recognised.
FileNotFoundError: If Zphyr STM32 cube path is not found
"""
if not stm32_serie.startswith("stm32"):
raise ValueError(
f"Error: Unknown stm32 serie: {stm32_serie}. Must start with 'stm32'"
)
# Set serie variables
self.stm32_serie = stm32_serie
self.stm32_seriexx = stm32_serie + "xx" # ex:stm32f3xx
self.stm32_serie_upper = stm32_serie.upper() # ex:STM32F3
self.stm32_seriexx_upper = self.stm32_serie_upper + "xx" # ex:STM32F3xx
self.serie = self.stm32_serie_upper[5:]
self.noclean = noclean
self.version_update = version_update
self.debug = debug
self.module_patch = f"module_{self.stm32_serie}.patch"
# ##### 3 root directories to work with ########
# 1: STM32Cube repo Default $HOME/STM32Cube_repo
# 2 : zephyr stm32 path : ex: .../zephyr_project/module/hal/stm32
# 3: Temporary directory to construct the update
# (within STM32Cube repo dir)
self.stm32cube_repo_path = stm32cube_repo_path
if not self.stm32cube_repo_path.exists():
self.stm32cube_repo_path.mkdir()
self.zephyr_hal_stm32_path = REPO_ROOT
if not self.zephyr_hal_stm32_path.exists():
raise FileNotFoundError("Error: cannot find zephyr project")
self.stm32cube_temp = self.stm32cube_repo_path / "temp_stm32xx_update"
if self.stm32cube_temp.exists():
shutil.rmtree(
str(self.stm32cube_temp), onerror=common_utils.remove_readonly
)
self.stm32cube_temp.mkdir()
# subdir specific to a stm32 serie
self.stm32cube_serie_path = self.stm32cube_repo_path / Path(
"STM32Cube" + self.serie
)
self.zephyr_module_serie_path = (
self.zephyr_hal_stm32_path / "stm32cube" / self.stm32_seriexx
)
self.stm32cube_temp_serie = (
self.stm32cube_temp / "stm32cube" / self.stm32_seriexx
)
shutil.rmtree(str(self.stm32cube_temp), onerror=common_utils.remove_readonly)
self.stm32cube_temp_serie.mkdir(parents=True)
self.readme_file_path = self.zephyr_module_serie_path / "README"
self.version_tag = []
self.current_version = ""
self.update_commit = ""
if self.debug:
logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.DEBUG)
self.std_dest = None
else:
logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
self.std_dest = subprocess.DEVNULL
def os_cmd(self, cmd, cwd=None, shell=False):
"""Execute a command with subprocess.check_call()
Args:
cmd: string command to execute.
cwd: directory where to run command
shell: boolean to enable command interpretation by the shell
Returns:
return the returncode of the command after execution.
"""
logging.debug("%s", f"{str(cmd)} cwd:{str(cwd)}")
return subprocess.check_call(
cmd,
shell=shell,
stdout=self.std_dest,
stderr=self.std_dest,
cwd=cwd,
)
def rename_conf_template(self, path):
"""renames hal_conf_template.h to hal_conf.h ...
Args:
path: path where to apply the files processing
"""
# except for _hal_conf_template.h which is renamed
hal_conf_template_fullpath = Path(
path / (self.stm32_seriexx + "_hal_conf_template.h")
)
if hal_conf_template_fullpath.is_file():
hal_conf_fullpath = Path(
re.sub("_template", r"", str(hal_conf_template_fullpath))
)
if hal_conf_fullpath.exists():
hal_conf_fullpath.unlink()
hal_conf_template_fullpath.rename(hal_conf_fullpath)
def major_branch(self):
# check whether master branch exist, otherwise use main branch
master_branch_exist = subprocess.check_output(
("git", "ls-remote", "--heads", "origin", "master"),
cwd=self.stm32cube_serie_path,
).decode("utf-8")
if master_branch_exist:
return "master"
else:
return "main"
def clone_cube_repo(self):
"""Clone or fetch a stm32 serie repo"""
repo_name = STM32_CUBE_REPO_BASE + self.serie + ".git"
logging.info(
"%s",
"Cloning/fetching repo "
+ repo_name
+ " in "
+ str(self.stm32cube_serie_path),
)
if self.stm32cube_serie_path.exists():
# if already exists, then just clean and fetch
self.os_cmd(("git", "clean", "-fdx"), cwd=self.stm32cube_serie_path)
self.os_cmd(("git", "fetch"), cwd=self.stm32cube_serie_path)
branch = self.major_branch()
self.os_cmd(
("git", "reset", "--hard", branch),
cwd=self.stm32cube_serie_path,
)
else:
self.os_cmd(
("git", "clone", repo_name),
cwd=self.stm32cube_repo_path,
)
branch = self.major_branch()
logging.info("%s", f"Branch used: {branch}")
# get the latest version of cube,
# with the most recent one created being the last entry.
self.os_cmd(("git", "checkout", branch), cwd=self.stm32cube_serie_path)
self.version_tag = subprocess.check_output(
("git", "tag", "-l"), cwd=self.stm32cube_serie_path
).splitlines()
self.version_tag = [x.decode("utf-8") for x in self.version_tag]
# Search latest version
if self.version_update == "":
self.version_update = self.version_tag[0]
for tag in self.version_tag:
if version_tuple(tag) > version_tuple(self.version_update):
self.version_update = tag
def get_zephyr_current_version(self):
"""Look for current zephyr hal version
Returns:
return previous zephyr cube version.
Raises:
ValueError: If version is not found.
"""
with open(str(self.readme_file_path), "r") as f:
for line in f:
# pattern : "version " follow by optional "v",
# followed by x.y or x.y.z x,y,z may represent several digits
# ex: 'version v1.8.9', 'version 10.20.25'
pattern = r".*version v?(\d+\.\d+\.?\d*).*$"
if re.match(pattern, line):
previous_version = re.sub(pattern, r"\1", line).rstrip("\n")
break
# Match previous version and list of existing tags
# which could be vx.y or x.y
pos_version = [
i for i, a in enumerate(self.version_tag) if previous_version in a
]
if pos_version:
# return previous zephyr version
return self.version_tag[pos_version[0]]
else:
self.clean_files()
raise ValueError(
f"Error: cannot find version {previous_version} in STM32Cube_repo"
)
def extract_source(self):
"""Extract sources and includes files from STM32Cube repo
and copy them in temporary directory
"""
# for CMSIS files
temp_cmsis_soc_path = self.stm32cube_temp_serie / "soc"
Path.mkdir(temp_cmsis_soc_path, parents=True)
stm32cube_cmsis_include_path = (
self.stm32cube_serie_path
/ "Drivers"
/ "CMSIS"
/ "Device"
/ "ST"
/ self.stm32_seriexx_upper
/ "Include"
)
shutil.rmtree(temp_cmsis_soc_path, onerror=common_utils.remove_readonly)
shutil.copytree(stm32cube_cmsis_include_path, temp_cmsis_soc_path)
stm32cube_cmsis_templates_path = (
self.stm32cube_serie_path
/ "Drivers"
/ "CMSIS"
/ "Device"
/ "ST"
/ self.stm32_seriexx_upper
/ "Source"
/ "Templates"
)
for repo_file in stm32cube_cmsis_templates_path.iterdir():
repo_src = stm32cube_cmsis_templates_path / repo_file
if repo_src.is_file():
shutil.copy(str(repo_src), str(temp_cmsis_soc_path))
# for hal and ll drivers
temp_drivers_include_path = self.stm32cube_temp_serie / "drivers" / "include"
temp_drivers_include_path.parent.mkdir(parents=True)
stm32cube_driver_inc = (
self.stm32cube_serie_path
/ "Drivers"
/ Path(self.stm32_seriexx_upper + "_HAL_Driver")
/ "Inc"
)
if temp_drivers_include_path.exists():
shutil.rmtree(
temp_drivers_include_path, onerror=common_utils.remove_readonly
)
shutil.copytree(stm32cube_driver_inc, temp_drivers_include_path)
# except for _hal_conf_template.h which is renamed
self.rename_conf_template(temp_drivers_include_path)
temp_drivers_src_path = self.stm32cube_temp_serie / "drivers" / "src"
temp_drivers_src_path.mkdir()
stm32cube_drivers_src_path = (
self.stm32cube_serie_path
/ "Drivers"
/ Path(self.stm32_seriexx_upper + "_HAL_Driver")
/ "Src"
)
shutil.rmtree(temp_drivers_src_path, onerror=common_utils.remove_readonly)
shutil.copytree(stm32cube_drivers_src_path, temp_drivers_src_path)
def build_from_current_cube_version(self):
"""Build a commit in temporary dir with STM32Cube version
corresponding to zephyr current hal version
"""
# reset the STM32Cube repo to this current version
self.os_cmd(
("git", "reset", "--hard", self.current_version),
cwd=self.stm32cube_serie_path,
)
# build the zephyr module from the stm32cube
self.extract_source()
logging.info(
"%s", "Building module from STM32Cube_repo " + self.current_version
)
if not self.stm32cube_temp_serie.parent.exists():
self.stm32cube_temp_serie.parent.mkdir(parents=True)
self.os_cmd(
("git", "add", "-A", "stm32cube/" + self.stm32_seriexx + "/*"),
cwd=self.stm32cube_temp,
)
self.os_cmd(
("git", "commit", "-am", '"module' + self.current_version + '"'),
cwd=self.stm32cube_temp,
)
# Remove trailing whitespaces
self.os_cmd(
("git", "rebase", "--whitespace=fix", "HEAD~1"),
cwd=self.stm32cube_temp,
)
def build_patch_from_current_zephyr_version(self):
"""Build patch between zephyr current hal version and
corresponding official STM32Cube version
"""
# clean-up the module
shutil.rmtree(
str(self.stm32cube_temp_serie), onerror=common_utils.remove_readonly
)
# populate the new repo with this current zephyr module
shutil.copytree(self.zephyr_module_serie_path, self.stm32cube_temp_serie)
# commit this current version module
self.os_cmd(("git", "add", "*"), cwd=self.stm32cube_temp)
self.os_cmd(("git", "commit", "-am", '"module"'), cwd=self.stm32cube_temp)
# Remove trailing space
self.os_cmd(
("git", "rebase", "--whitespace=fix", "HEAD~1"),
cwd=self.stm32cube_temp,
)
# generate a patch for files and _hal.conf.h file in the module
logging.info(
"%s",
"Building patch from official "
+ self.current_version
+ " to current zephyr module",
)
# For unclear reason, using tuple ("git", "diff", ...) is failing on Linux
# especially for this command. Keep a single string.
self.os_cmd(
("git diff --ignore-space-at-eol HEAD~1 --output=" + self.module_patch),
shell=True,
cwd=self.stm32cube_temp,
)
self.os_cmd(("dos2unix", self.module_patch), cwd=self.stm32cube_temp)
def update_readme(self, make_version, make_commit):
"""Update README file
Args:
make_version: latest STM32Cube version.
make_commit: Commit corresponding to latest STM32Cube version.
"""
see_release_note = True
readme_path = self.zephyr_module_serie_path / "README"
with readme_path.open(mode="r") as readme_prev:
lines = (x for x in readme_prev.read().splitlines())
readme_path.unlink()
# Write README from previous one if exists
with open(str(readme_path), "w") as readme_file:
for LineItem in lines:
# change version nb
if "status" in LineItem.lower():
readme_file.write("Status:\n")
readme_file.write(f" version {make_version}\n")
next(lines) # skip next line
elif "commit" in LineItem.lower():
readme_file.write("Commit:\n")
readme_file.write(f" {make_commit}")
next(lines) # skip next line
elif "URL" in LineItem.upper():
readme_file.write("URL:\n")
readme_file.write(
" https://github.com/STMicroelectronics/"
+ f"STM32Cube{self.serie}\n"
)
next(lines) # skip next line
# change patch list with a link to the release_note.html
elif "Patch List" in LineItem:
readme_file.write("Patch List:\n")
readme_file.write(
"--> please check that the following list "
+ "is still valid:\n"
)
else:
if "See release_note.html from STM32Cube" in LineItem:
see_release_note = False
readme_file.write(f"{LineItem}\n")
# at the very end of the file :
if see_release_note:
readme_file.write("\n See release_note.html from STM32Cube\n")
readme_file.flush()
self.os_cmd(("dos2unix", str(readme_path)))
def copy_release_note(self):
"""Copy | |
<reponame>rushabh-v/ignite
import os
import pytest
import torch
from sklearn.metrics import accuracy_score
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import Accuracy
torch.manual_seed(12)
def test_no_update():
acc = Accuracy()
with pytest.raises(NotComputableError, match=r"Accuracy must have at least one example before it can be computed"):
acc.compute()
def test__check_shape():
acc = Accuracy()
with pytest.raises(ValueError, match=r"y and y_pred must have compatible shapes"):
acc._check_shape((torch.randint(0, 2, size=(10, 1, 5, 12)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
with pytest.raises(ValueError, match=r"y and y_pred must have compatible shapes"):
acc._check_shape((torch.randint(0, 2, size=(10, 1, 6)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
with pytest.raises(ValueError, match=r"y and y_pred must have compatible shapes"):
acc._check_shape((torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 5)).long()))
def test_binary_wrong_inputs():
acc = Accuracy()
with pytest.raises(ValueError, match=r"For binary cases, y must be comprised of 0's and 1's"):
# y has not only 0 or 1 values
acc.update((torch.randint(0, 2, size=(10,)).long(), torch.arange(0, 10).long()))
with pytest.raises(ValueError, match=r"For binary cases, y_pred must be comprised of 0's and 1's"):
# y_pred values are not thresholded to 0, 1 values
acc.update((torch.rand(10,), torch.randint(0, 2, size=(10,)).long(),))
with pytest.raises(ValueError, match=r"y must have shape of "):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long()))
with pytest.raises(ValueError, match=r"y must have shape of "):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10, 5, 6)).long(), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError, match=r"y must have shape of "):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
def test_binary_input_N():
# Binary accuracy on input of shape (N, 1) or (N, )
def _test():
acc = Accuracy()
y_pred = torch.randint(0, 2, size=(10,)).long()
y = torch.randint(0, 2, size=(10,)).long()
acc.update((y_pred, y))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
assert acc._type == "binary"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
# Batched Updates
acc.reset()
y_pred = torch.randint(0, 2, size=(100,)).long()
y = torch.randint(0, 2, size=(100,)).long()
n_iters = 16
batch_size = y.shape[0] // n_iters + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
assert acc._type == "binary"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
# check multiple random inputs as random exact occurencies are rare
for _ in range(10):
_test()
def test_binary_input():
acc = Accuracy()
def _test(y_pred, y, n_iters):
acc.reset()
acc.update((y_pred, y))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
if n_iters > 1:
# Batched Updates
batch_size = y.shape[0] // n_iters + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
assert acc._type == "binary"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
def get_test_cases():
test_cases = [
# Binary accuracy on input of shape (N, L)
(torch.randint(0, 2, size=(10, 5)).long(), torch.randint(0, 2, size=(10, 5)).long(), 1),
(torch.randint(0, 2, size=(10, 1, 5)).long(), torch.randint(0, 2, size=(10, 1, 5)).long(), 1),
(torch.randint(0, 2, size=(100, 8)).long(), torch.randint(0, 2, size=(100, 8)).long(), 16),
# Binary accuracy on input of shape (N, H, W, ...)
(torch.randint(0, 2, size=(4, 1, 12, 10)).long(), torch.randint(0, 2, size=(4, 1, 12, 10)).long(), 1),
(torch.randint(0, 2, size=(4, 1, 12, 10)).long(), torch.randint(0, 2, size=(4, 1, 12, 10)).long(), 1),
(torch.randint(0, 2, size=(100, 8, 8)).long(), torch.randint(0, 2, size=(100, 8, 8)).long(), 16),
# Binary accuracy on input of shape (N, 1, ...) - Multiclass input
(torch.randint(0, 2, size=(4, 1)).long(), torch.randint(0, 2, size=(4,)).long(), 1),
(torch.randint(0, 2, size=(4, 1, 12)).long(), torch.randint(0, 2, size=(4, 12)).long(), 1),
(torch.randint(0, 2, size=(100, 1, 8, 8)).long(), torch.randint(0, 2, size=(100, 8, 8)).long(), 16),
# Multiclass input data of shape (N, ) and (N, C)
]
return test_cases
for _ in range(10):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, n_iters in test_cases:
_test(y_pred, y, n_iters)
def test_multiclass_wrong_inputs():
acc = Accuracy()
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.rand(10, 5, 4), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.rand(10, 5, 6), torch.randint(0, 5, size=(10, 5)).long()))
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.rand(10), torch.randint(0, 5, size=(10, 5, 6)).long()))
def test_multiclass_input():
acc = Accuracy()
def _test(y_pred, y, batch_size):
acc.reset()
acc.update((y_pred, y))
np_y_pred = y_pred.numpy().argmax(axis=1).ravel()
np_y = y.numpy().ravel()
if batch_size > 1:
# Batched Updates
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
assert acc._type == "multiclass"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
def get_test_cases():
test_cases = [
# Multiclass input data of shape (N, ) and (N, C)
(torch.rand(10, 4), torch.randint(0, 4, size=(10,)).long(), 1),
(torch.rand(10, 10, 1), torch.randint(0, 18, size=(10, 1)).long(), 1),
(torch.rand(10, 18), torch.randint(0, 18, size=(10,)).long(), 1),
(torch.rand(4, 10), torch.randint(0, 10, size=(4,)).long(), 1),
# 2-classes
(torch.rand(4, 2), torch.randint(0, 2, size=(4,)).long(), 1),
(torch.rand(100, 5), torch.randint(0, 5, size=(100,)).long(), 16),
# Multiclass input data of shape (N, L) and (N, C, L)
(torch.rand(10, 4, 5), torch.randint(0, 4, size=(10, 5)).long(), 1),
(torch.rand(4, 10, 5), torch.randint(0, 10, size=(4, 5)).long(), 1),
(torch.rand(100, 9, 7), torch.randint(0, 9, size=(100, 7)).long(), 16),
# Multiclass input data of shape (N, H, W, ...) and (N, C, H, W, ...)
(torch.rand(4, 5, 12, 10), torch.randint(0, 5, size=(4, 12, 10)).long(), 1),
(torch.rand(100, 3, 8, 8), torch.randint(0, 3, size=(100, 8, 8)).long(), 16),
]
return test_cases
for _ in range(10):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def to_numpy_multilabel(y):
# reshapes input array to (N x ..., C)
y = y.transpose(1, 0).cpu().numpy()
num_classes = y.shape[0]
y = y.reshape((num_classes, -1)).transpose(1, 0)
return y
def test_multilabel_wrong_inputs():
acc = Accuracy(is_multilabel=True)
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError):
# incompatible y_pred
acc.update((torch.rand(10, 5), torch.randint(0, 2, size=(10, 5)).long()))
with pytest.raises(ValueError):
# incompatible y
acc.update((torch.randint(0, 5, size=(10, 5, 6)), torch.rand(10)))
with pytest.raises(ValueError):
# incompatible binary shapes
acc.update((torch.randint(0, 2, size=(10, 1)), torch.randint(0, 2, size=(10, 1)).long()))
def test_multilabel_input():
acc = Accuracy(is_multilabel=True)
def _test(y_pred, y, batch_size):
acc.reset()
acc.update((y_pred, y))
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
np_y_pred = y_pred.numpy()
np_y = y.numpy()
assert acc._type == "multilabel"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
def get_test_cases():
test_cases = [
# Multilabel input data of shape (N, C, ...) and (N, C, ...)
(torch.randint(0, 2, size=(10, 4)), torch.randint(0, 2, size=(10, 4)).long(), 1),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 1),
(torch.randint(0, 2, size=(100, 4)), torch.randint(0, 2, size=(100, 4)).long(), 16),
]
return test_cases
for _ in range(10):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_multilabel_input_NHW():
acc = Accuracy(is_multilabel=True)
def _test(y_pred, y, batch_size):
acc.reset()
acc.update((y_pred, y))
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
np_y_pred = to_numpy_multilabel(y_pred) # (N, C, H, W, ...) -> (N * H * W ..., C)
np_y = to_numpy_multilabel(y) # (N, C, H, W, ...) -> (N * H * W ..., C)
assert acc._type == "multilabel"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
def get_test_cases():
test_cases = [
# Multilabel input data of shape (N, C, H, W, ...) and (N, C, H, W, ...)
(torch.randint(0, 2, size=(4, 5, 12, 10)), torch.randint(0, 2, size=(4, 5, 12, 10)).long(), 1),
(torch.randint(0, 2, size=(4, 10, 12, 8)).long(), torch.randint(0, 2, size=(4, 10, 12, 8)).long(), 1),
(torch.randint(0, 2, size=(100, 5, 12, 10)), torch.randint(0, 2, size=(100, 5, 12, 10)).long(), 16),
]
return test_cases
for _ in range(10):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_incorrect_type():
acc = Accuracy()
# Start as binary data
y_pred = torch.randint(0, 2, size=(4,))
y = torch.ones(4).long()
acc.update((y_pred, y))
# And add a multiclass data
y_pred = torch.rand(4, 4)
y = torch.ones(4).long()
with pytest.raises(RuntimeError):
acc.update((y_pred, y))
def _test_distrib_multilabel_input_NHW(device):
# Multilabel input data of shape (N, C, H, W, ...) and (N, C, H, W, ...)
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
acc = | |
#!/usr/env python
# X-HALE model for SHARPy.
# Version 1.0
# See IFASD 2019 paper by: <NAME> <NAME> and <NAME> and <NAME>.
#
# <NAME>
# June 2019
# ============================================================================
import h5py as h5
import numpy as np
import os
import sharpy.utils.algebra as algebra
route = os.path.dirname(os.path.realpath(__file__)) + '/'
cases = [
'0', # 15%, 15 chords, lateral
]
data = dict()
data['0'] = {
'gust_length': 15, # in chords
'gust_intensity': 0.15, # in % of uinf
'gust_shape': 'lateral 1-cos', # '1-cos' for vertical gust
}
horseshoe = 'off'
for case in cases:
vertical_tail = True
if vertical_tail:
case_name = 'xhale_ifasd' + '_' + case
print('Generating xhale with vertical Ctail')
else:
case_name = 'xhale_ifasd' + '_' + case
print('Generating xhale with horizontal Ctail')
flow = [
'BeamLoader',
'AerogridLoader',
# 'StaticTrim', # uncomment for longitudinal static trim
'StaticCoupled', # static coupled solver with GECB and UVLM
'BeamLoads', # beam loads and strains computation for static
'BeamPlot', # beam structure and data output for static
'AerogridPlot', # aero grid output for static
'DynamicCoupled', # dynamic coupled solver. See end of file:
# settings['DynamicCoupled']['postprocessors']
# for the corresponding 'BeamLoads', 'BeamPlot'
# and 'AerogridPlot' settings and calls.
]
u_inf = 14 # free stream vel [SI]
rho = 1.225 # density [SI]
chord = 0.2 # main wing chord length for gust dimensioning.
# the value used for the geometry generation is
# given in the input/*.xslx files
if vertical_tail:
alpha = 2.4744791522743887*np.pi/180 # angle of attack [rad]
beta = 0.*np.pi/180 # sideslip angle [rad]
cs_deflection = 1.186515244051093*np.pi/180 # elevators deflection [rad]
aileron_deflection = 0*0.039011*np.pi/180 # aileron deflection [rad]
thrustC = 0.21033486522175712 # baseline thrust [N]
differential = 0 # thrust of right side: T_R = thrustC*(1 + differential)
# thrust of left side: T_L = thrustC*(1 - differential)
roll = 0 # initial/static roll angle [rad]
in_structural_twist = 5*np.pi/180
else:
# NOTE: These values are not the correct trim. Run StaticTrim with your
# current discretisation
alpha = 2.4744791522743887*np.pi/180 # angle of attack [rad]
beta = 0.*np.pi/180 # sideslip angle [rad]
cs_deflection = 1.186515244051093*np.pi/180 # elevators deflection [rad]
aileron_deflection = 0*0.039011*np.pi/180 # aileron deflection [rad]
thrustC = 0.21033486522175712 # baseline thrust [N]
differential = 0 # thrust of right side: T_R = thrustC*(1 + differential)
# thrust of left side: T_L = thrustC*(1 - differential)
roll = 0 # initial/static roll angle [rad]
in_structural_twist = 5*np.pi/180
gravity = 'on'
gravity_value = 9.807
# stiffness multiplier
sigma = 1
# shear stiffness multipliers
ga_mult = 0.1
# spatial offset [m] for the gust. (if == 1, gust 1 m in front of reference
# point [0, 0, 0].
space_offset = 1.
try:
gust_intensity = data[case]['gust_intensity']
gust_length = data[case]['gust_length']*chord
gust_shape = data[case]['gust_shape']
except KeyError:
gust_intensity = 0
gust_length = 0
gust_shape = '1-cos'
# number of load substeps in the static coupled solver.
n_step = 1
# relaxation factor for the static coupled solver
# static relaxation factor \in [0, 1). 0 == no relaxation
static_relaxation_factor = 0.5
# Dynamic relaxation parameters. Relaxation is linearly varied between
# initial and final relaxation factor in relaxation_steps
initial_relaxation_factor = 0.4
final_relaxation_factor = 0.9
relaxation_steps = 15
# nonlinear beam tolerance.
tolerance = 1e-6
# FSI iteration tolerance
fsi_tolerance = 1e-6
# wake length when not running horseshoe
wake_length = 4 # meters
# geometrical data
span_section = 1.0
dihedral_outer = 10*np.pi/180
length_centre_tail = 1.106
length_outer_tail = 0.65
span_tail = 0.24
span_ctail_L = 0.145
span_ctail_R = 0.24
span_fin = 0.184
span_vfin = 0.15
n_sections = 3
# DISCRETISATION
# spatial discretisation
# chordwise discretisation
m = 8 # main wing
m_tail = 3 # tails
m_fin = 4 # fins and pods
# number of structural elements in inner and outer sections.
# note that you will have 2*n_elem spanwise aero panels
n_elem_section = 4
# structural elements in dihedral section
n_elem_section_dihedral = 8
# elements in central tail boom
n_elem_centre_tail = 1
# elements in outer tail booms
n_elem_outer_tail = 1
# elements in tails (per semi span)
n_elem_tail = 1
# elements in fins and pods
n_elem_fin = 1
n_elem_main = int((n_sections-1)*n_elem_section + n_elem_section_dihedral)
# number of aero surfaces. To understand the logic of this, check SHARPy's
# documentation
n_surfaces = 20
# temporal discretisation
# seconds of simulation
physical_time = 10
# factor multiplying the theoretical timestep
# (dt = chord/m/u_inf*tstep_factor)
tstep_factor = 1
dt = chord/m/u_inf*tstep_factor
n_tstep = round(physical_time/dt)
print('n_tstep: ', n_tstep)
# if horseshoe wake (only for static) is 'on'
# we only need one chordwise wake panel
if horseshoe == 'on':
mstar = 1
else:
# else, we put as many as we need to reach wake_length in steady conditions
mstar = int(wake_length/(u_inf*dt))
print('mstar = ', mstar)
# beam processing
# don't modify this
n_node_elem = 3
span_main = n_sections*span_section
# total elements, nodes... calculation
# total number of elements
n_elem = 0
n_elem += n_elem_main
n_elem += n_elem_main
n_elem += n_elem_centre_tail
n_elem += n_elem_tail
n_elem += n_elem_tail
n_elem += n_elem_fin
n_elem += n_elem_outer_tail
n_elem += n_elem_tail
n_elem += n_elem_tail
n_elem += n_elem_fin
n_elem += n_elem_outer_tail
n_elem += n_elem_tail
n_elem += n_elem_tail
n_elem += n_elem_fin
n_elem += n_elem_outer_tail
n_elem += n_elem_tail
n_elem += n_elem_tail
n_elem += n_elem_outer_tail
n_elem += n_elem_tail
n_elem += n_elem_tail
n_elem += n_elem_fin
n_elem += n_elem_fin
n_elem += n_elem_fin
n_elem += n_elem_fin
n_elem += n_elem_fin
# number of nodes per part
n_node_section = n_elem_section*(n_node_elem - 1) + 1
n_node_section_dihedral = n_elem_section_dihedral*(n_node_elem - 1) + 1
n_node_main = n_elem_main*(n_node_elem - 1) + 1
n_node_centre_tail = n_elem_centre_tail*(n_node_elem - 1) + 1
n_node_tail = n_elem_tail*(n_node_elem - 1) + 1
n_node_outer_tail = n_elem_outer_tail*(n_node_elem - 1) + 1
n_node_fin = n_elem_fin*(n_node_elem - 1) + 1
# total number of nodes
n_node = 0
n_node += n_node_main + n_node_main - 1
n_node += n_node_centre_tail - 1
n_node += n_node_tail - 1
n_node += n_node_tail - 1
n_node += n_node_fin - 1
n_node += n_node_outer_tail - 1
n_node += n_node_tail - 1
n_node += n_node_tail - 1
n_node += n_node_fin - 1
n_node += n_node_outer_tail - 1
n_node += n_node_tail - 1
n_node += n_node_tail - 1
n_node += n_node_fin - 1
n_node += n_node_outer_tail - 1
n_node += n_node_tail - 1
n_node += n_node_tail - 1
n_node += n_node_outer_tail - 1
n_node += n_node_tail - 1
n_node += n_node_tail - 1
n_node += n_node_fin - 1
n_node += n_node_fin - 1
n_node += n_node_fin - 1
n_node += n_node_fin - 1
n_node += n_node_fin - 1
# stiffness and mass matrices
# if you add custom stiffness/mass matrices, make sure to update this
# number
n_stiffness = 12
n_mass = 17
# PLACEHOLDERS
# beam
x = np.zeros((n_node, ))
y = np.zeros((n_node, ))
z = np.zeros((n_node, ))
stiffness_db = np.zeros((n_stiffness, 6, 6))
mass_db = np.zeros((n_mass, 6, 6))
beam_number = np.zeros((n_elem, ), dtype=int)
num_node_elements = np.zeros((n_elem, ), dtype=int) + 3
frame_of_reference_delta = np.zeros((n_elem, n_node_elem, 3))
structural_twist = np.zeros((n_elem, n_node_elem))
conn = np.zeros((n_elem, n_node_elem), dtype=int)
elem_stiffness = np.zeros((n_elem, ), dtype=int)
elem_mass = np.zeros((n_elem, ), dtype=int)
boundary_conditions = np.zeros((n_node, ), dtype=int)
app_forces = np.zeros((n_node, 6))
n_lumped_mass = 0
lumped_mass_nodes = None
lumped_mass = None
lumped_mass_inertia = None
lumped_mass_position = None
end_nodesL = np.zeros((n_sections,), dtype=int)
end_nodesR = np.zeros((n_sections,), dtype=int)
end_elementsL = np.zeros((n_sections,), dtype=int)
end_elementsR = np.zeros((n_sections,), dtype=int)
end_tails_nodesL = np.zeros((2, ), dtype=int)
end_tails_elementsL = np.zeros((2, ), dtype=int)
end_tails_nodesR = np.zeros((2, ), dtype=int)
end_tails_elementsR = np.zeros((2, ), dtype=int)
end_of_centre_tail_node = 0
end_of_centre_tail_elem = 0
end_tip_tail_nodeC = np.zeros((2, ), dtype=int)
end_tip_tail_elemC = np.zeros((2, ), dtype=int)
tail_beam_numbersR = np.zeros((2, 3)) # 0=centre spar, 1=R tail, 2=L tail
tail_beam_numbersL = np.zeros((2, 3)) # 0=centre spar, 1=R tail, 2=L tail
tail_beam_numbersC = np.zeros((3, ))
fin_beam_numberC = 0
fin_beam_numberL = 0
fin_beam_numberR = 0
vfin_beam_numberC = 0
vfin_beam_numberL = 0
vfin_beam_numberR = 0
fin_beam_numberLL = 0
fin_beam_numberRR = 0
# aero
airfoil_distribution = np.zeros((n_elem, n_node_elem), dtype=int)
surface_distribution = np.zeros((n_elem,), dtype=int) - 1
surface_m = np.zeros((n_surfaces, ), dtype=int)
# chordwise panel distribution. I'd leave there, but | |
# encoding: UTF-8
#
# Copyright (c) 2015 Facility for Rare Isotope Beams
#
"""
Request handlers for REST API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import functools
from collections import OrderedDict
from tornado.web import HTTPError
from tornado.web import RequestHandler
from tornado.gen import coroutine
from tornado.gen import maybe_future
from phyutil.phyapp.common.tornado.auth import AuthBasicMixin
from phyutil.phyapp.common.tornado.util import WriteJsonMixin
from . import LatticeSupportMixin
from . import ModelSupportMixin
from . import FileDownloadMixin
LOGGER = logging.getLogger(__name__)
def authorized(method):
"""Decorate handler methods with this to require users to be authorized.
Response status code 401 (Unauthorized) is sent for unauthorized users.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
self.send_error(401)
return
return method(self, *args, **kwargs)
return wrapper
class BaseRestRequestHandler(RequestHandler, AuthBasicMixin):
@coroutine
def prepare(self):
yield maybe_future(super(BaseRestRequestHandler, self).prepare())
yield self.prepare_auth_basic_user()
def get_current_user(self):
return self.get_auth_basic_user()
def write_error(self, status_code, **kwargs):
if status_code == 401:
self.set_unauthorized_header(**kwargs)
super(BaseRestRequestHandler,self).write_error(status_code, **kwargs)
def _particle_type_api(self, particle_type):
api = OrderedDict()
api["type"] = particle_type["type"]
api["links"] = {
"self":self.reverse_url("rest_particle_type_by_id", api["type"])
}
api["name"] = particle_type["name"]
api["protons"] = particle_type["protons"]
api["neutrons"] = particle_type["neutrons"]
return api
def _lattice_type_api(self, lattice_type):
api = OrderedDict()
api["type"] = lattice_type["type"]
api["links"] = {
"self":self.reverse_url("rest_lattice_type_by_id", api["type"])
}
api["name"] = lattice_type["name"]
return api
def _model_type_api(self, model_type):
api = OrderedDict()
api["type"] = model_type["type"]
api["links"] = {
"self":self.reverse_url("rest_model_type_by_id", api["type"])
}
api["name"] = model_type["name"]
return api
def _lattice_api(self, lattice):
api = OrderedDict()
api["id"] = str(lattice["_id"]) # ObjectId to String
api["links"] = { "self":self.reverse_url("rest_lattice_by_id", api["id"]) }
api["name"] = lattice["name"]
api["description"] = lattice["description"]
api["status_type"] = lattice["status_type"]
api["lattice_type"] = lattice["lattice_type"]
api["particle_type"] = lattice["particle_type"]
api["created_by"] = lattice["created_by"]
api["created_date"] = lattice["created_date"].isoformat()
api["properties"] = [self._lattice_prop_api(p) for p in lattice["properties"]]
files = []
for idx, lattice_file in enumerate(lattice["files"]):
files.append(self._lattice_file_api(lattice_file, api["id"], idx+1))
api["files"] = files
return api
def _lattice_file_api(self, lattice_file, lattice_id, file_id):
api = OrderedDict()
api["links"] = {
#"self":self.reverse_url("rest_lattice_file_by_id", "", ""),
"enclosure":self.reverse_url("rest_lattice_file_download_by_id", lattice_id, file_id)
}
api["name"] = lattice_file["name"]
api["filename"] = lattice_file["filename"]
return api
def _lattice_prop_api(self, lattice_prop):
api = OrderedDict()
api["name"] = lattice_prop["name"]
api["value"] = lattice_prop["value"]
if "units" in lattice_prop:
api["unit"] = lattice_prop["units"]
return api
def _lattice_elem_api(self, lattice_elem):
api = OrderedDict()
api["id"] = str(lattice_elem["_id"])
api["links"] = {
"self":self.reverse_url("rest_lattice_element_by_id", api["id"])
}
api["type"] = lattice_elem["type"]
api["lattice_id"] = str(lattice_elem["lattice_id"])
api["order"] = lattice_elem["order"]
api["name"] = lattice_elem["name"]
api["length"] = lattice_elem["length"]
api["position"] = lattice_elem["position"]
properties = []
for p in lattice_elem["properties"]:
properties.append(self._lattice_elem_prop_api(p))
api["properties"] = properties
return api
def _lattice_elem_prop_api(self, lattice_elem_prop):
api = OrderedDict()
api["name"] = lattice_elem_prop["name"]
api["value"] = lattice_elem_prop["value"]
if "units" in lattice_elem_prop:
api["unit"] = lattice_elem_prop["units"]
return api
def _model_api(self, model):
api = OrderedDict()
api["id"] = str(model["_id"])
api["links"] = {
"self":self.reverse_url("rest_model_by_id", api["id"])
}
api["lattice_id"] = str(model["lattice_id"])
api["name"] = model["name"]
api["description"] = model["description"]
api["created_by"] = model["created_by"]
api["created_date"] = model["created_date"].isoformat()
properties = []
for p in model["properties"]:
properties.append(self._model_prop_api(p))
api["properties"] = properties
files = []
for idx, model_file in enumerate(model["files"]):
files.append(self._model_file_api(model_file, api["id"], str(idx+1)))
api["files"] = files
return api
def _model_file_api(self, model_file, model_id, file_id):
api = OrderedDict()
api["links"] = {
"enclosure":self.reverse_url("rest_model_file_download_by_id", model_id, file_id)
}
api["name"] = model_file["name"]
api["filename"] = model_file["filename"]
return api
def _model_prop_api(self, model_prop):
api = OrderedDict()
api["name"] = model_prop["name"]
api["value"] = model_prop["value"]
if "units" in model_prop:
api["unit"] = model_prop["units"]
return api
def _model_elem_api(self, model_elem):
api = OrderedDict()
api["id"] = str(model_elem["_id"])
api["links"] = {
"self":self.reverse_url("rest_model_element_by_id", api["id"])
}
api["model_id"] = str(model_elem["model_id"])
api["lattice_element_id"] = str(model_elem["lattice_element_id"])
properties = []
for p in model_elem["properties"]:
properties.append(self._model_elem_prop_api(p))
api["properties"] = properties
return api
def _model_elem_prop_api(self, model_elem_prop):
api = OrderedDict()
api["name"] = model_elem_prop["name"]
api["value"] = model_elem_prop["value"]
if "units" in model_elem_prop:
api["unit"] = model_elem_prop["units"]
return api
class ParticleTypesRestHandler(BaseRestRequestHandler, WriteJsonMixin):
@coroutine
def get(self):
"""Retrieve list of Particle Types.
**Example response**:
.. sourcecode:: json
HTTP/1.1 200 OK
Content-Type: text/json
[
{
"type": "ar36",
"links": {
"self": "/lattice/rest/v1/particle/types/ar36"
},
"name": "Ar-36",
"protons": 18.0,
"neutrons": 18.0
},
...
]
:status 200: Particle Types found
"""
data = self.application.data
particle_types = yield data.find_particle_types()
self.write_json([self._particle_type_api(pt) for pt in particle_types])
class ParticleTypeRestHandler(BaseRestRequestHandler, WriteJsonMixin):
@coroutine
def get(self, type_id):
"""Retrieve Particle Type by ID
**Example response**:
.. sourcecode:: json
HTTP/1.1 200 OK
Content-Type: text/json
[
{
"type": "ar36",
"links": {
"self": "/lattice/rest/v1/particle/types/ar36"
},
"name": "Ar-36",
"protons": 18.0,
"neutrons": 18.0
},
...
]
:param type_id: Particle Type ID
:status 200: Particle Type found
:status 404: Particle Type not found
"""
data = self.application.data
particle_type = yield data.find_particle_type_by_id(type_id)
if not particle_type:
raise HTTPError(404)
self.write_json(self._particle_type_api(particle_type))
class LatticeTypesRestHandler(BaseRestRequestHandler, WriteJsonMixin):
@coroutine
def get(self):
"""Retrieve list of Lattice Types.
**Example response**:
.. sourcecode:: json
HTTP/1.1 200 OK
Content-Type: text/json
[
{
"type": "impactz",
"links": {
"self": "/lattice/rest/v1/lattices/types/impactz"
},
"name": "IMPACT"
},
...
]
:status 200: Lattice Types found
"""
data = self.application.data
lattice_types = yield data.find_lattice_types()
self.write_json([self._lattice_type_api(lt) for lt in lattice_types])
class LatticeTypeRestHandler(BaseRestRequestHandler, WriteJsonMixin):
@coroutine
def get(self, type_id):
"""Retrieve Lattice Type by ID.
**Example response**:
.. sourcecode:: json
HTTP/1.1 200 OK
Content-Type: text/json
{
"type": "impactz",
"links": {
"self": "/lattice/rest/v1/lattices/types/impactz"
},
"name": "IMPACT"
}
:param type_id: Lattice Type ID
:status 200: Lattice Type found
:status 404: Lattice Type not found
"""
data = self.application.data
lattice_type = yield data.find_lattice_type_by_id(type_id)
self.write_json(self._lattice_type_api(lattice_type))
class LatticesRestHandler(BaseRestRequestHandler, WriteJsonMixin):
@coroutine
def get(self):
"""Retrieve list of Lattice objects.
**Example response**:
.. sourcecode:: json
HTTP/1.1 200 OK
Content-Type: text/json
[
{
"id": "55e7542bfad7b66cf2598b4a",
"links": {
"self": "/lattice/rest/v1/lattices/55e7542bfad7b66cf2598b4a"
},
"name": "Test",
"description": "This is a description",
"status_type": "development",
"lattice_type": "impactz",
"particle_type": "kr86",
"created_by": "physuser",
"created_date": "2015-09-02T15:55:23.852000",
"properties": [
{
"name": "RefParticleMass",
"value": 931494320.0
},
...
]
"files": [
{
"links": {
"enclosure": "/lattice/rest/v1/lattices/55e7542bfad7b66cf2598b4a/files/1/download"
},
"name": "LatticeFile",
"filename": "test.in"
},
...
]
}
...
]
:status 200: Lattices found
"""
data = self.application.data
lattices = yield data.find_lattices()
self.write_json([self._lattice_api(l) for l in lattices])
class LatticeRestHandler(BaseRestRequestHandler, WriteJsonMixin):
@coroutine
def get(self, lattice_id):
"""Retrieve Lattice object by identifier.
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"id": "55e7542bfad7b66cf2598b4a",
"links": {
"self": "/lattice/rest/v1/lattices/55e7542bfad7b66cf2598b4a"
},
"name": "Test",
"description": "This is a description",
"status_type": "development",
"lattice_type": "impactz",
"particle_type": "kr86",
"created_by": "physuser",
"created_date": "2015-09-02T15:55:23.852000",
"properties": [
{
"name": "RefParticleMass",
"value": 931494320.0
},
...
]
"files": [
{
"links": {
"enclosure": "/lattice/rest/v1/lattices/55e7542bfad7b66cf2598b4a/files/1/download"
},
"name": "LatticeFile",
"filename": "test.in"
},
...
]
}
:param lattice_id: Lattice ID
:status 200: Lattice found
:status 404: Lattice not found
"""
data = self.application.data
lattice = yield data.find_lattice_by_id(lattice_id)
if not lattice:
raise HTTPError(404)
self.write_json(self._lattice_api(lattice))
class LatticeUploadRestHandler(BaseRestRequestHandler, WriteJsonMixin, LatticeSupportMixin):
@authorized
@coroutine
def post(self, type_id):
"""Create a new Lattice by submitting form data.
Content type MUST be 'multipart/form-data'
Content of the form is dictated by the Lattice type being submitted.
For Lattice type 'impactz' the follow parameters are supported:
*name*: new lattice name
*branch*: new lattice branch
*version*: new lattice version (ignored if autoversion is specified)
*autoversion*: (optional) automatically select version of new lattice
*particle_type*: particle type associated with new lattice
*description*: new lattice description
*lattice_file*: raw IMPACT lattice file (ie test.in)
*data_file*: data files referenced by the lattice (multiple allowed)
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"links":{
"replies":"/lattice/rest/v1/lattices/5618320efad7b6600d1f2ecc"
}
}
:param type_id: Lattice Type ID
:status 201: Lattice created
:status 401: Unauthorized
:status 404: Lattice Type not supported
"""
lattice_support = self.construct_lattice_support(type_id)
yield lattice_support.rest_form_upload_post()
class LatticeFilesDownloadRestHander(BaseRestRequestHandler, FileDownloadMixin):
@coroutine
def get(self, lattice_id):
"""
Retrieve the an archive file containing all files of the Lattice specifed.
:param lattice_id: Lattice ID
:status 200: Lattice file found
:status 404: Lattice file not found
"""
yield self.get_lattice_files(lattice_id)
class LatticeFileDownloadRestHander(BaseRestRequestHandler, FileDownloadMixin):
@coroutine
def get(self, lattice_id, file_id):
"""
Retrieve the file content of the Lattice File specifed.
:param lattice_id: Lattice ID
:param file_id: Lattice file ID
:status 200: Lattice file found
:status 404: Lattice file not found
"""
yield self.get_lattice_file(lattice_id, file_id)
class LatticeElementsByOrderRestHandler(BaseRestRequestHandler, WriteJsonMixin):
@coroutine
def get(self, lattice_id):
"""Retrieve Lattice Elements by Lattice ID.
**Example response**:
.. sourcecode:: json
HTTP/1.1 200 OK
Content-Type: text/json
[
{
"id": "55e7542bfad7b66cf2598b4e",
"links": {
"self": "/lattice/rest/v1/lattices/elements/55e7542bfad7b66cf2598b4e"
},
"type": "VALVE",
"lattice_id": "55e7542bfad7b66cf2598b4a",
"name": "DRIFT",
"length": 0.072,
"position": 0.07200000000000273,
"properties": []
},
{
"id": "55e7542bfad7b66cf2598b50",
"links": {
"self": "/lattice/rest/v1/lattices/elements/55e7542bfad7b66cf2598b50"
},
"type": "CAV",
"lattice_id": "55e7542bfad7b66cf2598b4a",
"name": "LS1_CA01:CAV1_D1127",
"length": 0.24,
"position": 0.44706350000001294,
"properties": [
{
"name": "AMP",
"value": 0.64
},
{
"name": "PHA",
"value": -6.524
}
]
},
...
]
:param lattice_id: Lattice ID
:status 200: Lattice Elements found
"""
data = self.application.data
elements = yield data.find_lattice_elements_by_lattice_id(lattice_id)
self.write_json([self._lattice_elem_api(e) for e in elements])
class LatticeElementByOrderRestHandler(BaseRestRequestHandler, WriteJsonMixin):
@coroutine
def get(self, lattice_id, order):
"""Retrieve Lattice Element by Lattice ID and element order.
**Example response**:
.. sourcecode:: http
HTTP/1.1 | |
if remote_sudo and self.username != ROOT_ACCOUNT:
# TODO: Implement scp with remote sudo.
raise NotImplementedError('Cannot run scp with sudo!')
kwargs.setdefault('debug_level', self.debug_level)
# scp relies on 'scp' being in the $PATH of the non-interactive,
# SSH login shell.
scp_cmd = (['scp', '-P', str(self.port)] +
CompileSSHConnectSettings(ConnectTimeout=60) +
['-i', self.private_key])
if not self.interactive:
scp_cmd.append('-n')
if recursive:
scp_cmd.append('-r')
if verbose:
scp_cmd.append('-v')
if to_local:
scp_cmd += ['%s:%s' % (self.target_ssh_url, src), dest]
else:
scp_cmd += glob.glob(src) + ['%s:%s' % (self.target_ssh_url, dest)]
rc_func = cros_build_lib.RunCommand
if sudo:
rc_func = cros_build_lib.SudoRunCommand
return rc_func(scp_cmd, print_cmd=verbose, **kwargs)
def ScpToLocal(self, *args, **kwargs):
"""Scp a path from the remote device to the local machine."""
return self.Scp(*args, to_local=kwargs.pop('to_local', True), **kwargs)
def PipeToRemoteSh(self, producer_cmd, cmd, **kwargs):
"""Run a local command and pipe it to a remote sh command over ssh.
Args:
producer_cmd: Command to run locally with its results piped to |cmd|.
cmd: Command to run on the remote device.
**kwargs: See RemoteSh for documentation.
"""
result = cros_build_lib.RunCommand(producer_cmd, stdout_to_pipe=True,
print_cmd=False, capture_output=True)
return self.RemoteSh(cmd, input=kwargs.pop('input', result.output),
**kwargs)
class RemoteDeviceHandler(object):
"""A wrapper of RemoteDevice."""
def __init__(self, *args, **kwargs):
"""Creates a RemoteDevice object."""
self.device = RemoteDevice(*args, **kwargs)
def __enter__(self):
"""Return the temporary directory."""
return self.device
def __exit__(self, _type, _value, _traceback):
"""Cleans up the device."""
self.device.Cleanup()
class ChromiumOSDeviceHandler(object):
"""A wrapper of ChromiumOSDevice."""
def __init__(self, *args, **kwargs):
"""Creates a RemoteDevice object."""
self.device = ChromiumOSDevice(*args, **kwargs)
def __enter__(self):
"""Return the temporary directory."""
return self.device
def __exit__(self, _type, _value, _traceback):
"""Cleans up the device."""
self.device.Cleanup()
class RemoteDevice(object):
"""Handling basic SSH communication with a remote device."""
DEFAULT_BASE_DIR = '/tmp/remote-access'
def __init__(self, hostname, port=None, username=None,
base_dir=DEFAULT_BASE_DIR, connect_settings=None,
private_key=None, debug_level=logging.DEBUG, ping=True):
"""Initializes a RemoteDevice object.
Args:
hostname: The hostname of the device.
port: The ssh port of the device.
username: The ssh login username.
base_dir: The base directory of the working directory on the device.
connect_settings: Default SSH connection settings.
private_key: The identify file to pass to `ssh -i`.
debug_level: Setting debug level for logging.
ping: Whether to ping the device before attempting to connect.
"""
self.hostname = hostname
self.port = port
self.username = username
# The tempdir is for storing the rsa key and/or some temp files.
self.tempdir = osutils.TempDir(prefix='ssh-tmp')
self.connect_settings = (connect_settings if connect_settings else
CompileSSHConnectSettings())
self.private_key = private_key
self.agent = self._SetupSSH()
self.debug_level = debug_level
# Setup a working directory on the device.
self.base_dir = base_dir
if ping and not self.Pingable():
raise DeviceNotPingable('Device %s is not pingable.' % self.hostname)
# Do not call RunCommand here because we have not set up work directory yet.
self.BaseRunCommand(['mkdir', '-p', self.base_dir])
self.work_dir = self.BaseRunCommand(
['mktemp', '-d', '--tmpdir=%s' % base_dir],
capture_output=True).output.strip()
logging.debug(
'The tempory working directory on the device is %s', self.work_dir)
self.cleanup_cmds = []
self.RegisterCleanupCmd(['rm', '-rf', self.work_dir])
def Pingable(self, timeout=20):
"""Returns True if the device is pingable.
Args:
timeout: Timeout in seconds (default: 20 seconds).
Returns:
True if the device responded to the ping before |timeout|.
"""
result = cros_build_lib.RunCommand(
['ping', '-c', '1', '-w', str(timeout), self.hostname],
error_code_ok=True,
capture_output=True)
return result.returncode == 0
def _SetupSSH(self):
"""Setup the ssh connection with device."""
return RemoteAccess(self.hostname, self.tempdir.tempdir, port=self.port,
username=self.username, private_key=self.private_key)
def _HasRsync(self):
"""Checks if rsync exists on the device."""
result = self.agent.RemoteSh(['PATH=%s:$PATH rsync' % DEV_BIN_PATHS,
'--version'], error_code_ok=True)
return result.returncode == 0
def RegisterCleanupCmd(self, cmd, **kwargs):
"""Register a cleanup command to be run on the device in Cleanup().
Args:
cmd: command to run. See RemoteAccess.RemoteSh documentation.
**kwargs: keyword arguments to pass along with cmd. See
RemoteAccess.RemoteSh documentation.
"""
self.cleanup_cmds.append((cmd, kwargs))
def Cleanup(self):
"""Remove work/temp directories and run all registered cleanup commands."""
for cmd, kwargs in self.cleanup_cmds:
# We want to run through all cleanup commands even if there are errors.
kwargs.setdefault('error_code_ok', True)
self.BaseRunCommand(cmd, **kwargs)
self.tempdir.Cleanup()
def CopyToDevice(self, src, dest, mode=None, **kwargs):
"""Copy path to device."""
msg = 'Could not copy %s to device.' % src
if mode is None:
# Use rsync by default if it exists.
mode = 'rsync' if self._HasRsync() else 'scp'
if mode == 'scp':
# scp always follow symlinks
kwargs.pop('follow_symlinks', None)
func = self.agent.Scp
else:
func = self.agent.Rsync
return RunCommandFuncWrapper(func, msg, src, dest, **kwargs)
def CopyFromDevice(self, src, dest, mode=None, **kwargs):
"""Copy path from device."""
msg = 'Could not copy %s from device.' % src
if mode is None:
# Use rsync by default if it exists.
mode = 'rsync' if self._HasRsync() else 'scp'
if mode == 'scp':
# scp always follow symlinks
kwargs.pop('follow_symlinks', None)
func = self.agent.ScpToLocal
else:
func = self.agent.RsyncToLocal
return RunCommandFuncWrapper(func, msg, src, dest, **kwargs)
def CopyFromWorkDir(self, src, dest, **kwargs):
"""Copy path from working directory on the device."""
return self.CopyFromDevice(os.path.join(self.work_dir, src), dest, **kwargs)
def CopyToWorkDir(self, src, dest='', **kwargs):
"""Copy path to working directory on the device."""
return self.CopyToDevice(src, os.path.join(self.work_dir, dest), **kwargs)
def PipeOverSSH(self, filepath, cmd, **kwargs):
"""Cat a file and pipe over SSH."""
producer_cmd = ['cat', filepath]
return self.agent.PipeToRemoteSh(producer_cmd, cmd, **kwargs)
def Reboot(self):
"""Reboot the device."""
return self.agent.RemoteReboot()
def BaseRunCommand(self, cmd, **kwargs):
"""Executes a shell command on the device with output captured by default.
Args:
cmd: command to run. See RemoteAccess.RemoteSh documentation.
**kwargs: keyword arguments to pass along with cmd. See
RemoteAccess.RemoteSh documentation.
"""
kwargs.setdefault('debug_level', self.debug_level)
kwargs.setdefault('connect_settings', self.connect_settings)
try:
return self.agent.RemoteSh(cmd, **kwargs)
except SSHConnectionError:
logging.error('Error connecting to device %s', self.hostname)
raise
def RunCommand(self, cmd, **kwargs):
"""Executes a shell command on the device with output captured by default.
Also sets environment variables using dictionary provided by
keyword argument |extra_env|.
Args:
cmd: command to run. See RemoteAccess.RemoteSh documentation.
**kwargs: keyword arguments to pass along with cmd. See
RemoteAccess.RemoteSh documentation.
"""
new_cmd = cmd
# Handle setting environment variables on the device by copying
# and sourcing a temporary environment file.
extra_env = kwargs.pop('extra_env', None)
if extra_env:
env_list = ['export %s=%s' % (k, cros_build_lib.ShellQuote(v))
for k, v in extra_env.iteritems()]
remote_sudo = kwargs.pop('remote_sudo', False)
with tempfile.NamedTemporaryFile(dir=self.tempdir.tempdir,
prefix='env') as f:
logging.debug('Environment variables: %s', ' '.join(env_list))
osutils.WriteFile(f.name, '\n'.join(env_list))
self.CopyToWorkDir(f.name)
env_file = os.path.join(self.work_dir, os.path.basename(f.name))
new_cmd = ['.', '%s;' % env_file]
if remote_sudo and self.agent.username != ROOT_ACCOUNT:
new_cmd += ['sudo', '-E']
new_cmd += cmd
return self.BaseRunCommand(new_cmd, **kwargs)
class ChromiumOSDevice(RemoteDevice):
"""Basic commands to interact with a ChromiumOS device over SSH connection."""
MAKE_DEV_SSD_BIN = '/usr/share/vboot/bin/make_dev_ssd.sh'
MOUNT_ROOTFS_RW_CMD = ['mount', '-o', 'remount,rw', '/']
LIST_MOUNTS_CMD = ['cat', '/proc/mounts']
GET_BOARD_CMD = ['grep', 'CHROMEOS_RELEASE_BOARD', '/etc/lsb-release']
def __init__(self, *args, **kwargs):
super(ChromiumOSDevice, self).__init__(*args, **kwargs)
self.board = self._LearnBoard()
self.path = self._GetPath()
def _GetPath(self):
"""Gets $PATH on the device and prepend it with DEV_BIN_PATHS."""
try:
result = self.BaseRunCommand(['echo', "${PATH}"])
except cros_build_lib.RunCommandError:
logging.warning('Error detecting $PATH on the device.')
raise
return '%s:%s' % (DEV_BIN_PATHS, result.output.strip())
def _RemountRootfsAsWritable(self):
"""Attempts to Remount the root partition."""
logging.info("Remounting '/' with rw...")
self.RunCommand(self.MOUNT_ROOTFS_RW_CMD, error_code_ok=True,
remote_sudo=True)
def _RootfsIsReadOnly(self):
"""Returns True if rootfs on is mounted as read-only."""
r = self.RunCommand(self.LIST_MOUNTS_CMD, capture_output=True)
for line in r.output.splitlines():
if not line:
continue
chunks = line.split()
if chunks[1] == '/' and 'ro' in chunks[3].split(','):
return True
return False
def DisableRootfsVerification(self):
"""Disables device rootfs verification."""
logging.info('Disabling rootfs verification on device...')
self.RunCommand(
[self.MAKE_DEV_SSD_BIN, '--remove_rootfs_verification', '--force'],
error_code_ok=True, remote_sudo=True)
# TODO(yjhong): Make sure an update is not pending.
logging.info('Need to reboot to actually disable the verification.')
self.Reboot()
def MountRootfsReadWrite(self):
"""Checks mount types and remounts them as read-write if needed.
Returns:
True if rootfs is mounted as read-write. False otherwise.
"""
if not self._RootfsIsReadOnly():
return True
# If the image on the device is built with rootfs verification
# disabled, we can simply remount '/' as read-write.
self._RemountRootfsAsWritable()
if not self._RootfsIsReadOnly():
return True
logging.info('Unable to remount rootfs as rw (normal w/verified rootfs).')
# If the image is built with rootfs verification, turn off the
# rootfs verification. After reboot, the rootfs will be mounted as
# read-write (there is no need to remount).
self.DisableRootfsVerification()
return not self._RootfsIsReadOnly()
def _LearnBoard(self):
"""Grab the board reported by the remote device.
In the case of multiple matches, uses the first one. In the case of no
entry or if the command failed, returns an empty string.
"""
try:
result = self.BaseRunCommand(self.GET_BOARD_CMD, capture_output=True)
except cros_build_lib.RunCommandError:
logging.warning('Error detecting the board.')
return ''
# In the case of multiple matches, use the first one.
output = result.output.splitlines()
if len(output) > 1:
| |
<reponame>williamtrang/DSC20<gh_stars>0
"""
DSC 20 Homework 01
Name: <NAME> (failure to write name or pid will be penalized)
PID: A16679845
"""
# Question 1
def unlucky_number(numbers):
"""
# Takes in a list of numbers and adds the elements up.
# Returns True if '4' is in the calculated sum and False if not.
>>> unlucky_number([1,2,3,4])
False
>>> unlucky_number([1,2,3,4,4])
True
# Add at least 3 doctests below here #
>>> unlucky_number([3,3,3,3,3,3,3,3])
True
>>> unlucky_number([])
False
>>> unlucky_number([-4])
True
>>> unlucky_number([-4, 3])
False
"""
list_sum = 0
bad_number = '4'
for i in range(0, len(numbers)):
list_sum += numbers[i]
return bad_number in str(list_sum)
# Question 2
def pick_name(names):
"""
# Takes in a list of strings and returns the one with the least
# number of words.
>>> pick_name(["Hi, welcome to DSC20!", "Goodbye to DSC10!", \
"Get Ready To Work Hard!"])
'Goodbye to DSC10!'
>>> pick_name(["Start Early!", "Start Often!", "LET'S GO!"])
'Start Early!'
>>> pick_name(["Weiyue likes the Fire Spot"])
'Weiyue likes the Fire Spot'
# Add at least 3 doctests below here #
>>> pick_name(["This is even worse.", "This is sad."])
'This is sad.'
>>> pick_name([])
''
>>> pick_name(['', "Japanese ramen"])
''
"""
if len(names) == 0:
return ""
names_split = []
minimum = 1000000000
min_index = 0
for i in range(0, len(names)):
names_split.append(names[i].split())
if len(names_split[i]) < minimum:
minimum = len(names_split[i])
min_index = i
return names[min_index]
# Question 3
def replace_text(text, target_word, desired_word):
"""
# Takes in 3 arguments of text we are replacing in, a word to
# replace, and the word to replace it with. Replaces first
# instance of word only, and returns original text in all caps
# if the word to replace is not within the given text.
>>> replace_text("Dumplings is a very famous dish for the new year", \
"Dumplings", "🥟")
'🥟 is a very famous dish for the new year'
>>> replace_text("dumplings dumplings dumplings", "dumplings", "🥟")
'🥟 dumplings dumplings'
>>> replace_text("We all love DSC20", "Lie", "Truth")
'WE ALL LOVE DSC20'
>>> replace_text("Happy! new! Year!", "!", "🧧")
'Happy🧧 new! Year!'
# Add at least 3 doctests below here #
>>> replace_text('', 'DSC20', 'DSC30')
''
>>> replace_text('Dumpling soup', '', 'lets go')
'lets goDumpling soup'
>>> replace_text('abra', 'a', 'switch')
'switchbra'
"""
if target_word not in text:
return text.upper()
return text.replace(target_word, desired_word, 1)
# Question 4
def approved_recipe(recipe, day, threshold):
"""
# Takes in three arguments. First is a list containing
# ingredients and their weights, second is the day of the week,
# and the third is the threshold that the combined weights must
# be above. Day of the week affects the multiplier of the
# ingredients weight. For a recipe to be approved, it must contain
# the right ingredients and a weight above the threshold.
>>> approved_recipe([['msg', 10], ['rice', 20], ['egg', 30]], 'FRIDAY', 30)
'Fuiyoh'
>>> approved_recipe([['msg', 10], ['rice', 20], ['egg', 30]], 'friday', 31)
'Haiyah'
>>> approved_recipe([['soy sauce', 10], ['rice', 20], ['egg', 30]], \
'FRIDAY', 30)
'Haiyah'
# Add at least 3 doctests below here #
>>> approved_recipe([['msg', 10], ['rice', 20], ['egg', 30]], 'SuNdAY', 60)
'Fuiyoh'
>>> approved_recipe([['msg', 6], ['rice', 20], ['egg', 30]], 'Sunday', 60)
'Haiyah'
>>> approved_recipe([['a', 1], ['b', 2], ['c', 3]], 'Saturday', 0)
'Haiyah'
"""
weekends = ['saturday', 'sunday']
weights = 0
weight_multiplier = 0.5
ingredients = []
if day.lower() in weekends:
weight_multiplier = 1
for i in range(0, len(recipe)):
weights += (recipe[i][1] * weight_multiplier)
if weights < threshold:
return 'Haiyah'
for i in range(0, len(recipe)):
ingredients.append(recipe[i][0])
if ('msg' in ingredients) and ('egg' in ingredients) \
and ('rice' in ingredients):
return 'Fuiyoh'
return 'Haiyah'
# Question 5
def money_got(grades):
"""
# Argument is a list of strings representing grades. Sums up
# and returns the amount of money that should be received for
# each report card.
>>> money_got([])
'Gapped'
>>> money_got(["A+", 'A+', "A+", 'A', 'P'])
90
>>> money_got(["A+", "A+", "W"])
0
# Add at least 3 doctests below here #
>>> money_got(["a+", "w", "A+", "b-"])
8
>>> money_got(["p", "W", "A-"])
-170
>>> money_got(["a+", "a", "a-", "b+", "b", "b-"])
158
"""
if len(grades) == 0:
return 'Gapped'
money = 0
a_plus_value = 50
a_value = 40
a_minus_value = 30
b_plus_value = 20
b_value = 10
b_minus_value = 8
not_in_scale_value = -100
for i in range(0, len(grades)):
if grades[i].upper() == 'A+':
money += a_plus_value
elif grades[i].upper() == 'A':
money += a_value
elif grades[i].upper() == 'A-':
money += a_minus_value
elif grades[i].upper() == 'B+':
money += b_plus_value
elif grades[i].upper() == 'B':
money += b_value
elif grades[i].upper() == 'B-':
money += b_minus_value
else:
money += not_in_scale_value
return money
# Question 6
def number_bought(name, grades, product, price):
"""
# Takes in arguments of a person's name, grades, the product
# they want to buy, and the unit price of that product. Calculates
# the amount of spending money based on their grades to determine
# how many units of the product they can buy maximum.
>>> number_bought("Yi", ["A+", 'A+', "A+", 'A', 'P'], "milk tea", 5)
'Yi has bought 18 milk tea and has $0 left.'
>>> number_bought("Yi", ["A+", 'A+', "A+", 'A', 'P'], "milk tea", 5.2)
'System Error!'
>>> number_bought("Weiyue", ["S"], "Football", 200)
'Weiyue has bought 0 Football and has $-100 left.'
# Add at least 3 doctests below here #
>>> number_bought('William', ['A+', 'A', 'A-'], 'pc', 120)
'William has bought 1 pc and has $0 left.'
>>> number_bought('William', ['A+', 'A+', 'C'], 'dvd', 1)
'William has bought 0 dvd and has $0 left.'
>>> number_bought('', ['A+'], 'dvd', 3)
' has bought 16 dvd and has $2 left.'
"""
if type(price) is not int:
return 'System Error!'
spending_money = money_got(grades)
units_bought = spending_money // price
remaining_money = spending_money % price
if spending_money <= 0:
units_bought = 0
remaining_money = spending_money
return name + ' has bought ' + str(units_bought) + ' ' + str(product) + \
' and has $' + str(remaining_money) + ' left.'
# Question 7
def report(people, their_grades, products, prices):
"""
# Takes in lists of a people's name, grades, the product
# they want to buy, and the unit price of that product. Calculates
# the amount of spending money based on their grades to determine
# how many units of the product they can buy maximum.
>>> print(report(["Theo"], [["A+"]], ["iPad"], [1200]))
Theo has bought 0 iPad and has $50 left.
>>> print(report(["Yi", "Yi", "Weiyue", "Jianming"], \
[["A+", 'A+', "A+", 'A', 'P'], ["A+", 'A+', "A+", 'A', 'P'],\
["S"], ["A"]], ["milk tea", "MILK TEA", "Football", "Flowers"], \
[5,5.2,200,1]))
Yi has bought 18 milk tea and has $0 left.
Jianming has bought 40 Flowers and has $0 left.
System Error!
Weiyue has bought 0 Football and has $-100 left.
>>> print(report(["Yi", "Weiyue", "Jianming"], \
[["A+", 'A+', "A+", 'A', 'P'], \
["S"], ["A"]], ["milk tea", "Football", "Flowers"], \
[5,200,1]))
Yi has bought 18 milk tea and has $0 left.
Jianming has bought 40 Flowers and has $0 left.
Weiyue has bought 0 Football and has $-100 left.
# Add at least 3 doctests below here #
>>> print(report([''], ['A'], [''], ['']))
System Error!
>>> print(report(["Jim", "Joe", "Jack", "Jay", "Jin"], \
[["A+", 'A+', "A+", 'A', 'P'], \
["S"], ["A"], ["F"], ["A", "A+"]],\
["boba", "ball", "bic", "bank", "boar"], [5,200,1,1,3]))
Jim has bought 18 boba and has $0 left.
Jin has bought 30 boar and has $0 left.
Joe has bought 0 ball and has $-100 left.
Jay has bought 0 bank and has $-100 left.
Jack has bought 40 bic and has $0 left.
>>> print(report(["John"], [["A-"]], ["cubes"], [14]))
John has bought 2 cubes and has $2 left.
"""
full_report = ""
for_count = 0
back_count = len(people) - 1
while for_count < back_count:
full_report += number_bought(people[for_count], their_grades[for_count]
, products[for_count], prices[for_count]) + '\n'
full_report += number_bought(people[back_count],
their_grades[back_count], products[back_count], prices[back_count])
for_count += 1
back_count -= 1
if (for_count == back_count) or for_count < back_count:
full_report += '\n'
if for_count == back_count:
full_report += number_bought(people[for_count],
their_grades[for_count], products[for_count], prices[for_count])
return full_report
# Question 8
def pick_best_shoes(selections, numbers):
"""
# Takes in a list of strings (shoe names) and a list | |
#***************#
ACCESS_LEVEL = 1
#***************#
ERROR_MESSAGE = ""
#Check our user's session and access level
if SECURITY_check_user_permissions(ACCESS_LEVEL, request.user.permissions.access_level):
#We need to return a json list of all formtype RTYPES that match the provided formtype pk
if request.method == "POST":
#We only add queries to the user and nothing else
currentQueries = request.user.permissions.saved_queries
print >>sys.stderr, currentQueries
if currentQueries != "" and currentQueries != None:
currentQuery = json.loads(currentQueries)
currentQuery[request.POST['new_query_label']] = request.POST['new_query']
finishedQueryList = json.dumps(currentQuery);
request.user.permissions.saved_queries = finishedQueryList
request.user.permissions.save()
return HttpResponse(finishedQueryList, content_type="application/json" )
else:
newQuery = {}
newQuery[request.POST['new_query_label']] = request.POST['new_query']
newQuery = json.dumps(newQuery)
request.user.permissions.saved_queries = newQuery
request.user.permissions.save()
return HttpResponse(newQuery, content_type="application/json" )
ERROR_MESSAGE += "Error: You have not submitted through POST"
else: ERROR_MESSAGE += "Error: You do not have permission to access modifying user information"
#If anything goes wrong in the process, return an error in the json HTTP Response
SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), ERROR_MESSAGE, request.META)
return HttpResponse('{"ERROR":"'+ ERROR_MESSAGE +'"}',content_type="application/json")
#-------------------------------------------------------------------------------------------------------
# MODEL QUERY ENDPOINTS
#=======================================================#
# ACCESS LEVEL : 1 GET_PROJECTS() *RECYCLING
#=======================================================#
def get_projects(self, request):
#***************#
ACCESS_LEVEL = 1
#***************#
#----------------------------------------------------------------------------------------------------------------------------
# This Endpoint returns a list of all projects. This is used mainly by the query engine
# --to figure out which rtypes to search by when a record reference type is chosen.
ERROR_MESSAGE = ""
#Check our user's session and access level
if SECURITY_check_user_permissions(ACCESS_LEVEL, request.user.permissions.access_level):
#We need to return a json list of all formtype RTYPES that match the provided formtype pk
if request.method == "POST":
#let's get all the public projects, which may not include our own, so let's redundantly merge it and then call distinct()
publicProjects = FormProject.objects.filter(is_public=True)
userProject = FormProject.objects.filter(pk=request.user.permissions.project.pk)
if publicProjects.exists():
finalProjects = (publicProjects |userProject).distinct()
else:
finalProjects = userProject
finalJSON = {}
project_list = []
for aProject in finalProjects:
project_list.append({"name":aProject.name, "pk":aProject.pk})
finalJSON['project_list'] = project_list
finalJSON = json.dumps(finalJSON)
return HttpResponse(finalJSON, content_type="application/json" )
ERROR_MESSAGE += "Error: You have not submitted through POST"
else: ERROR_MESSAGE += "Error: You do not have permission to access modifying user information"
#If anything goes wrong in the process, return an error in the json HTTP Response
SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), ERROR_MESSAGE, request.META)
return HttpResponse('{"ERROR":"'+ ERROR_MESSAGE +'"}',content_type="application/json")
#=======================================================#
# ACCESS LEVEL : 1 GET_FORMTYPES() *RECYCLING
#=======================================================#
def get_formtypes(self, request):
#***************#
ACCESS_LEVEL = 1
#***************#
#----------------------------------------------------------------------------------------------------------------------------
# This Endpoint returns a list of all formtypes for a provided project pk. This is used mainly by the query engine
# --to figure out which formtypes to add to a dropdown select by when a project is chosen.
ERROR_MESSAGE = ""
#Check our user's session and access level
if SECURITY_check_user_permissions(ACCESS_LEVEL, request.user.permissions.access_level):
#We need to return a json list of all formtype RTYPES that match the provided formtype pk
if request.method == "POST":
#Let's get all available public formtypes not in recycling--unless the formtypes are from the users current, project.
#If it is the users current project, then don't use a is_public filter
print >>sys.stderr, request.POST['project_pk'] + " : "
if str(request.user.permissions.project.pk) == request.POST['project_pk']:
print >>sys.stderr, "What...?" + str(request.user.permissions.project.pk)
allFormTypes = FormType.objects.filter(project__pk=request.POST['project_pk'], flagged_for_deletion=False)
else:
allFormTypes = FormType.objects.filter(is_public=True, project__pk=request.POST['project_pk'], flagged_for_deletion=False)
if allFormTypes:
finalJSON = {}
formtype_list = []
for aFormType in allFormTypes:
formtype_list.append({"name":aFormType.form_type_name, "pk":aFormType.pk})
finalJSON['formtype_list'] = formtype_list
finalJSON = json.dumps(finalJSON)
return HttpResponse(finalJSON, content_type="application/json" )
else: ERROR_MESSAGE += "Error: no form types were found for this project"
else: ERROR_MESSAGE += "Error: You have not submitted through POST"
else: ERROR_MESSAGE += "Error: You do not have permission to access modifying user information"
#If anything goes wrong in the process, return an error in the json HTTP Response
SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), ERROR_MESSAGE, request.META)
return HttpResponse('{"ERROR":"'+ ERROR_MESSAGE +'"}',content_type="application/json")
#=======================================================#
# ACCESS LEVEL : 1 GET_FORMTYPE_GEOSPATIAL_LAYERS() *RECYCLING
#=======================================================#
def get_formtype_geospatial_layers(self, request):
#***************#
ACCESS_LEVEL = 1
#***************#
#----------------------------------------------------------------------------------------------------------------------------
# This Endpoint returns a list of geoJSON 'geometry' layers to add to a openlayers map
ERROR_MESSAGE = ""
#Check our user's session and access level
if SECURITY_check_user_permissions(ACCESS_LEVEL, request.user.permissions.access_level):
if request.method == "POST":
print >>sys.stderr, request.POST['formtype_pk'] + " : "
currentFormType = FormType.objects.get(pk=request.POST['formtype_pk'])
if request.user.permissions.project.pk == currentFormType.project.pk:
#geometry needs to be stored as a list of 'features'
allGeometry = {}
allGeometry['type'] = "FeatureCollection"
allGeometry['name'] = currentFormType.form_type_name
#allGeometry['crs'] = json.loads('{ "type": "name", "properties": { "name": "urn:ogc:def:crs:EPSG::32638" } }')
featureList = []
allGeometry['features'] = featureList
allForms = currentFormType.form_set.all()
if allForms:
for aForm in allForms:
properties = {}
allFRATs = aForm.form_type.formrecordattributetype_set.all();
if allFRATs:
for FRAT in allFRATs:
properties[FRAT.record_type] = FormRecordAttributeValue.objects.get(record_attribute_type=FRAT, form_parent=aForm).record_value
feature = {}
feature['properties'] = properties
feature['type'] = "Feature"
feature['geometry'] = json.loads(aForm.form_geojson_string)
print >>sys.stderr, "Loaded Timer"
featureList.append(feature)
allGeometry = json.dumps(allGeometry)
return HttpResponse(allGeometry,content_type="application/json")
else: ERROR_MESSAGE += "You do not have permission to access this form type from another project"
else: ERROR_MESSAGE += "Error: You have not submitted through POST"
else: ERROR_MESSAGE += "Error: You do not have permission to access modifying user information"
#If anything goes wrong in the process, return an error in the json HTTP Response
SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), ERROR_MESSAGE, request.META)
return HttpResponse('{"ERROR":"'+ ERROR_MESSAGE +'"}',content_type="application/json")
#=======================================================#
# ACCESS LEVEL : 1 GET_RTYPES *RECYCLING
#=======================================================#
def get_rtypes(self, request):
#***************#
ACCESS_LEVEL = 1
#***************#
#----------------------------------------------------------------------------------------------------------------------------
# This Endpoint returns a list of all rtypes for a provided formtype pk. This is used mainly by the query engine
# --to figure out which formtypes to add to a dropdown select by when a project is chosen.
ERROR_MESSAGE = ""
#Check our user's session and access level
if SECURITY_check_user_permissions(ACCESS_LEVEL, request.user.permissions.access_level):
#We need to return a json list of all formtype RTYPES that match the provided formtype pk
if request.method == "POST":
#Grab the formtype
currentFormType = FormType.objects.get(pk=request.POST['formtype_pk'])
#If the requested formtype isn't the user's project, and flagged as being inaccessible then stop the request
if currentFormType.project.pk != request.user.permissions.project.pk and (currentFormType.flagged_for_deletion == True or currentFormType.is_public == False):
ERROR_MESSAGE += "Error: You are attempting to access records that don't exist. This probably occurred because your client attempted altering the POST data before sending"
#Otherwise we are in the clear so grab the list and return it
else:
finalJSON = {}
rtypeList = []
#Don't filter out the public flags if this formtype is the users project--if it's not then absolutely use the is_public flags
if currentFormType.project.pk == request.user.permissions.project.pk:
#***RECYCLING BIN*** Make sure that the returned FRAT AND FRRTS are filtered by their deletion flags. Don't want them returned in the query
for FRAT in currentFormType.formrecordattributetype_set.all().filter(flagged_for_deletion=False):
currentRTYPE = {}
currentRTYPE['label'] = FRAT.record_type
currentRTYPE['pk'] = FRAT.pk
currentRTYPE['rtype'] = 'FRAT'
rtypeList.append(currentRTYPE)
#***RECYCLING BIN*** Make sure that the returned FRAT AND FRRTS are filtered by their deletion flags. Don't want them returned in the query
for FRRT in currentFormType.ref_to_parent_formtype.all().filter(flagged_for_deletion=False):
currentRTYPE = {}
currentRTYPE['label'] = FRRT.record_type
currentRTYPE['pk'] = FRRT.pk
if FRRT.form_type_reference: currentRTYPE['ref_formtype_pk'] = FRRT.form_type_reference.pk
else: currentRTYPE['ref_formtype_pk'] = "None"
currentRTYPE['rtype'] = 'FRRT'
rtypeList.append(currentRTYPE)
else:
#***RECYCLING BIN*** Make sure that the returned FRAT AND FRRTS are filtered by their deletion flags. Don't want them returned in the query
for FRAT in currentFormType.formrecordattributetype_set.all().filter(flagged_for_deletion=False, is_public=True):
currentRTYPE = {}
currentRTYPE['label'] = FRAT.record_type
currentRTYPE['pk'] = FRAT.pk
currentRTYPE['rtype'] = 'FRAT'
rtypeList.append(currentRTYPE)
#***RECYCLING BIN*** Make sure that the returned FRAT AND FRRTS are filtered by their deletion flags. Don't want them returned in the query
for FRRT in currentFormType.ref_to_parent_formtype.all().filter(flagged_for_deletion=False, is_public=True):
currentRTYPE = {}
currentRTYPE['label'] = FRRT.record_type
currentRTYPE['pk'] = FRRT.pk
if FRRT.form_type_reference: currentRTYPE['ref_formtype_pk'] = FRRT.form_type_reference.pk
else: currentRTYPE['ref_formtype_pk'] = "None"
currentRTYPE['rtype'] = 'FRRT'
rtypeList.append(currentRTYPE)
#sort our rtype list by the label
rtypeList = sorted(rtypeList, key=lambda k: k['label'])
#Return the JSON response
finalJSON['rtype_list'] = rtypeList
finalJSON = json.dumps(finalJSON)
return HttpResponse(finalJSON, content_type="application/json" )
else: ERROR_MESSAGE += "Error: You have not submitted | |
<reponame>billymccafferty/bno055
# Copyright 2021 AUTHORS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the AUTHORS nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import json
from math import sqrt
import struct
import sys
from time import sleep
from bno055 import registers
from bno055.connectors.Connector import Connector
from bno055.params.NodeParameters import NodeParameters
from geometry_msgs.msg import Quaternion
from rclpy.node import Node
from rclpy.qos import QoSProfile
from sensor_msgs.msg import Imu, MagneticField, Temperature
from std_msgs.msg import String
class SensorService:
"""Provide an interface for accessing the sensor's features & data."""
def __init__(self, node: Node, connector: Connector, param: NodeParameters):
self.node = node
self.con = connector
self.param = param
prefix = self.param.ros_topic_prefix.value
QoSProf = QoSProfile(depth=10)
# create topic publishers:
self.pub_imu_raw = node.create_publisher(Imu, prefix + 'imu_raw', QoSProf)
self.pub_imu = node.create_publisher(Imu, prefix + 'imu', QoSProf)
self.pub_mag = node.create_publisher(MagneticField, prefix + 'mag', QoSProf)
self.pub_temp = node.create_publisher(Temperature, prefix + 'temp', QoSProf)
self.pub_calib_status = node.create_publisher(String, prefix + 'calib_status', QoSProf)
def configure(self):
"""Configure the IMU sensor hardware."""
self.node.get_logger().info('Configuring device...')
try:
data = self.con.receive(registers.BNO055_CHIP_ID_ADDR, 1)
if data[0] != registers.BNO055_ID:
raise IOError('Device ID=%s is incorrect' % data)
# print("device sent ", binascii.hexlify(data))
except Exception as e: # noqa: B902
# This is the first communication - exit if it does not work
self.node.get_logger().error('Communication error: %s' % e)
self.node.get_logger().error('Shutting down ROS node...')
sys.exit(1)
# IMU connected => apply IMU Configuration:
if not (self.con.transmit(registers.BNO055_OPR_MODE_ADDR, 1, bytes([registers.OPERATION_MODE_CONFIG]))):
self.node.get_logger().warn('Unable to set IMU into config mode.')
if not (self.con.transmit(registers.BNO055_PWR_MODE_ADDR, 1, bytes([registers.POWER_MODE_NORMAL]))):
self.node.get_logger().warn('Unable to set IMU normal power mode.')
if not (self.con.transmit(registers.BNO055_PAGE_ID_ADDR, 1, bytes([0x00]))):
self.node.get_logger().warn('Unable to set IMU register page 0.')
if not (self.con.transmit(registers.BNO055_SYS_TRIGGER_ADDR, 1, bytes([0x00]))):
self.node.get_logger().warn('Unable to start IMU.')
if not (self.con.transmit(registers.BNO055_UNIT_SEL_ADDR, 1, bytes([0x83]))):
self.node.get_logger().warn('Unable to set IMU units.')
# The sensor placement configuration (Axis remapping) defines the
# position and orientation of the sensor mount.
# See also Bosch BNO055 datasheet section Axis Remap
mount_positions = {
'P0': bytes(b'\x21\x04'),
'P1': bytes(b'\x24\x00'),
'P2': bytes(b'\x24\x06'),
'P3': bytes(b'\x21\x02'),
'P4': bytes(b'\x24\x03'),
'P5': bytes(b'\x21\x02'),
'P6': bytes(b'\x21\x07'),
'P7': bytes(b'\x24\x05')
}
if not (self.con.transmit(registers.BNO055_AXIS_MAP_CONFIG_ADDR, 2,
mount_positions[self.param.placement_axis_remap.value])):
self.node.get_logger().warn('Unable to set sensor placement configuration.')
# Show the current sensor offsets
print('Current sensor offsets:')
self.get_calib_offsets()
if (self.param.set_offsets):
configured_offsets = \
self.set_calib_offsets(
self.param.offset_acc,
self.param.offset_mag,
self.param.offset_gyr)
if (configured_offsets):
print('Successfully configured sensor offsets to:')
self.get_calib_offsets()
# Set Device to NDOF mode
# data fusion for gyroscope, acceleration sensor and magnetometer enabled
# absolute orientation
if not (self.con.transmit(registers.BNO055_OPR_MODE_ADDR, 1, bytes([registers.OPERATION_MODE_NDOF]))):
self.node.get_logger().warn('Unable to set IMU operation mode into operation mode.')
self.node.get_logger().info('Bosch BNO055 IMU configuration complete.')
def get_sensor_data(self):
"""Read IMU data from the sensor, parse and publish."""
# Initialize ROS msgs
imu_raw_msg = Imu()
imu_msg = Imu()
mag_msg = MagneticField()
temp_msg = Temperature()
# read from sensor
buf = self.con.receive(registers.BNO055_ACCEL_DATA_X_LSB_ADDR, 45)
# Publish raw data
# TODO: convert rcl Clock time to ros time?
# imu_raw_msg.header.stamp = node.get_clock().now()
imu_raw_msg.header.frame_id = self.param.frame_id.value
# TODO: do headers need sequence counters now?
# imu_raw_msg.header.seq = seq
# TODO: make this an option to publish?
imu_raw_msg.orientation_covariance = [
self.param.variance_orientation.value[0], 0.0 , 0.0,
0.0, self.param.variance_orientation.value[1], 0.0,
0.0, 0.0, self.param.variance_orientation.value[2]
]
imu_raw_msg.linear_acceleration.x = \
self.unpackBytesToFloat(buf[0], buf[1]) / self.param.acc_factor.value
imu_raw_msg.linear_acceleration.y = \
self.unpackBytesToFloat(buf[2], buf[3]) / self.param.acc_factor.value
imu_raw_msg.linear_acceleration.z = \
self.unpackBytesToFloat(buf[4], buf[5]) / self.param.acc_factor.value
imu_raw_msg.linear_acceleration_covariance = [
self.param.variance_acc.value[0], 0.0, 0.0,
0.0, self.param.variance_acc.value[1], 0.0,
0.0, 0.0, self.param.variance_acc.value[2]
]
imu_raw_msg.angular_velocity.x = \
self.unpackBytesToFloat(buf[12], buf[13]) / self.param.gyr_factor.value
imu_raw_msg.angular_velocity.y = \
self.unpackBytesToFloat(buf[14], buf[15]) / self.param.gyr_factor.value
imu_raw_msg.angular_velocity.z = \
self.unpackBytesToFloat(buf[16], buf[17]) / self.param.gyr_factor.value
imu_raw_msg.angular_velocity_covariance = [
self.param.variance_angular_vel.value[0], 0.0, 0.0,
0.0, self.param.variance_angular_vel.value[1], 0.0,
0.0, 0.0, self.param.variance_angular_vel.value[2]
]
# node.get_logger().info('Publishing imu message')
self.pub_imu_raw.publish(imu_raw_msg)
# TODO: make this an option to publish?
# Publish filtered data
# imu_msg.header.stamp = node.get_clock().now()
imu_msg.header.frame_id = self.param.frame_id.value
q = Quaternion()
# imu_msg.header.seq = seq
q.w = self.unpackBytesToFloat(buf[24], buf[25])
q.x = self.unpackBytesToFloat(buf[26], buf[27])
q.y = self.unpackBytesToFloat(buf[28], buf[29])
q.z = self.unpackBytesToFloat(buf[30], buf[31])
# TODO(flynneva): replace with standard normalize() function
# normalize
norm = sqrt(q.x*q.x + q.y*q.y + q.z*q.z + q.w*q.w)
imu_msg.orientation.x = q.x / norm
imu_msg.orientation.y = q.y / norm
imu_msg.orientation.z = q.z / norm
imu_msg.orientation.w = q.w / norm
imu_msg.orientation_covariance = imu_raw_msg.orientation_covariance
imu_msg.linear_acceleration.x = \
self.unpackBytesToFloat(buf[32], buf[33]) / self.param.acc_factor.value
imu_msg.linear_acceleration.y = \
self.unpackBytesToFloat(buf[34], buf[35]) / self.param.acc_factor.value
imu_msg.linear_acceleration.z = \
self.unpackBytesToFloat( buf[36], buf[37]) / self.param.acc_factor.value
imu_msg.linear_acceleration_covariance = imu_raw_msg.linear_acceleration_covariance
imu_msg.angular_velocity.x = \
self.unpackBytesToFloat(buf[12], buf[13]) / self.param.gyr_factor.value
imu_msg.angular_velocity.y = \
self.unpackBytesToFloat(buf[14], buf[15]) / self.param.gyr_factor.value
imu_msg.angular_velocity.z = \
self.unpackBytesToFloat(buf[16], buf[17]) / self.param.gyr_factor.value
imu_msg.angular_velocity_covariance = imu_raw_msg.angular_velocity_covariance
self.pub_imu.publish(imu_msg)
# Publish magnetometer data
# mag_msg.header.stamp = node.get_clock().now()
mag_msg.header.frame_id = self.param.frame_id.value
# mag_msg.header.seq = seq
mag_msg.magnetic_field.x = \
self.unpackBytesToFloat(buf[6], buf[7]) / self.param.mag_factor.value
mag_msg.magnetic_field.y = \
self.unpackBytesToFloat(buf[8], buf[9]) / self.param.mag_factor.value
mag_msg.magnetic_field.z = \
self.unpackBytesToFloat(buf[10], buf[11]) / self.param.mag_factor.value
mag_msg.magnetic_field_covariance = [
self.param.variance_mag.value[0], 0.0, 0.0,
0.0, self.param.variance_mag.value[1], 0.0,
0.0, 0.0, self.param.variance_mag.value[2]
]
self.pub_mag.publish(mag_msg)
# Publish temperature
# temp_msg.header.stamp = node.get_clock().now()
temp_msg.header.frame_id = self.param.frame_id.value
# temp_msg.header.seq = seq
temp_msg.temperature = float(buf[44])
self.pub_temp.publish(temp_msg)
def get_calib_status(self):
"""
Read calibration status for sys/gyro/acc/mag.
Quality scale: 0 = bad, 3 = best
"""
calib_status = self.con.receive(registers.BNO055_CALIB_STAT_ADDR, 1)
sys = (calib_status[0] >> 6) & 0x03
gyro = (calib_status[0] >> 4) & 0x03
accel = (calib_status[0] >> 2) & 0x03
mag = calib_status[0] & 0x03
# Create dictionary (map) and convert it to JSON string:
calib_status_dict = {'sys': sys, 'gyro': gyro, 'accel': accel, 'mag': mag}
calib_status_str = String()
calib_status_str.data = json.dumps(calib_status_dict)
# Publish via ROS topic:
self.pub_calib_status.publish(calib_status_str)
def get_calib_offsets(self):
"""Read all calibration offsets and print to screen."""
accel_offset_read = self.con.receive(registers.ACCEL_OFFSET_X_LSB_ADDR, 6)
accel_offset_read_x = (accel_offset_read[1] << 8) | accel_offset_read[
0] # Combine MSB and LSB registers into one decimal
accel_offset_read_y = (accel_offset_read[3] << 8) | accel_offset_read[
2] # Combine MSB and LSB registers into one decimal
accel_offset_read_z = (accel_offset_read[5] << 8) | accel_offset_read[
4] # Combine MSB and LSB registers into one decimal
mag_offset_read = self.con.receive(registers.MAG_OFFSET_X_LSB_ADDR, 6)
mag_offset_read_x = (mag_offset_read[1] << 8) | mag_offset_read[
0] # Combine MSB and LSB registers into one decimal
mag_offset_read_y = (mag_offset_read[3] << 8) | mag_offset_read[
2] # Combine MSB and LSB registers into one decimal
mag_offset_read_z = (mag_offset_read[5] << 8) | mag_offset_read[
4] # Combine MSB and LSB registers into one decimal
gyro_offset_read = self.con.receive(registers.GYRO_OFFSET_X_LSB_ADDR, 6)
gyro_offset_read_x = (gyro_offset_read[1] << 8) | gyro_offset_read[
0] # Combine MSB and LSB registers into one decimal
gyro_offset_read_y = (gyro_offset_read[3] << 8) | gyro_offset_read[
2] # Combine MSB and LSB registers into one decimal
gyro_offset_read_z = (gyro_offset_read[5] << 8) | gyro_offset_read[
4] # Combine MSB and LSB registers into one decimal
self.node.get_logger().info(
'\tAccel offsets (x y z): %d %d %d' % (
accel_offset_read_x,
accel_offset_read_y,
accel_offset_read_z))
self.node.get_logger().info(
'\tMag offsets (x y z): %d %d %d' % (
mag_offset_read_x,
mag_offset_read_y,
mag_offset_read_z))
self.node.get_logger().info(
'\tGyro offsets (x y z): %d %d %d' % (
gyro_offset_read_x,
gyro_offset_read_y,
gyro_offset_read_z))
def set_calib_offsets(self, acc_offset, mag_offset, gyr_offset):
"""
Write calibration data (define as 16 bit signed hex).
:param acc_offset:
:param mag_offset:
:param gyr_offset:
"""
# Must switch to config mode to write out
if not (self.con.transmit(registers.BNO055_OPR_MODE_ADDR, 1, bytes([registers.OPERATION_MODE_CONFIG]))):
self.node.get_logger().error('Unable to set IMU into config mode')
sleep(0.025)
# Seems to only work when | |
<gh_stars>0
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pytest
import networkx as nx
import tensorflow as tf
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from nncf.common.graph.transformations.commands import TargetType
from nncf.common.graph.transformations.commands import TransformationPriority
from nncf.common.graph.transformations.commands import TransformationType
from nncf.common.quantization.structs import QuantizerConfig
from nncf.common.quantization.structs import QuantizationMode
from nncf.tensorflow.graph.converter import TFModelConverterFactory
from nncf.tensorflow.graph.model_transformer import TFModelTransformer
from nncf.tensorflow.graph.transformations import commands
from nncf.tensorflow.graph.transformations.commands import TFInsertionCommand
from nncf.tensorflow.graph.transformations.commands import TFLayer
from nncf.tensorflow.graph.transformations.commands import TFLayerWeight
from nncf.tensorflow.graph.transformations.commands import TFMultipleInsertionCommands
from nncf.tensorflow.graph.transformations.layout import TFTransformationLayout
from nncf.tensorflow.graph.utils import is_functional_model
from nncf.tensorflow.layers.custom_objects import NNCF_CUSTOM_OBJECTS
from nncf.tensorflow.layers.operation import NNCFOperation
from nncf.tensorflow.quantization.layers import FakeQuantize
from nncf.tensorflow.quantization.quantizers import TFQuantizerSpec
from tests.tensorflow.test_compressed_graph import keras_model_to_tf_graph
from tests.tensorflow.test_compressed_graph import check_nx_graph
from tests.tensorflow.test_compressed_graph import get_nx_graph_from_tf_graph
def test_insertion_commands_union_invalid_input():
cmd_0 = commands.TFInsertionCommand(commands.TFBeforeLayer('layer_0'))
cmd_1 = commands.TFInsertionCommand(commands.TFAfterLayer('layer_0'))
with pytest.raises(Exception):
cmd_0.union(cmd_1)
priority_types = ["same", "different"]
@pytest.mark.parametrize("case", priority_types, ids=priority_types)
def test_insertion_command_priority(case):
def make_operation_fn(priority_value):
def operation_fn():
return priority_value
return operation_fn
cmds = []
if case == 'same':
for idx in range(3):
cmds.append(
commands.TFInsertionCommand(
commands.TFBeforeLayer('layer_0'),
make_operation_fn(idx)
))
else:
priorites = sorted(list(TransformationPriority), key=lambda x: x.value, reverse=True)
for priority in priorites:
cmds.append(
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
make_operation_fn(priority.value),
priority
))
res_cmd = cmds[0]
for cmd in cmds[1:]:
res_cmd = res_cmd + cmd
res = res_cmd.insertion_objects
assert len(res) == len(cmds)
assert all(res[i]() <= res[i + 1]() for i in range(len(res) - 1))
def test_union_with_instance_idx_priority():
cmd = commands.TFAfterLayer('layer_0')
callable_object = lambda: 'insert_after'
cmd_0 = commands.TFInsertionCommand(cmd, callable_object, callable_object_instance_idx=0,
priority=TransformationPriority.QUANTIZATION_PRIORITY)
cmd_1 = commands.TFInsertionCommand(cmd, callable_object, callable_object_instance_idx=1,
priority=TransformationPriority.DEFAULT_PRIORITY)
cmd_union = cmd_0.union(cmd_1)
res = cmd_union.insertion_objects
assert len(res) == 2
assert res[0].callable == res[1].callable
assert res[0].instance_idx == 1
assert res[1].instance_idx == 0
def test_removal_command_union():
cmd_0 = commands.TFRemovalCommand(commands.TFLayer('layer_0'))
cmd_1 = commands.TFRemovalCommand(commands.TFLayer('layer_1'))
with pytest.raises(Exception):
cmd_0.union(cmd_1)
def test_add_insertion_command_to_multiple_insertion_commands_same():
check_fn = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name
cmd_0 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_0')
cmd_1 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_1')
m_cmd = commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn
)
m_cmd.add_insertion_command(cmd_0)
m_cmd.add_insertion_command(cmd_1)
res_cmds = m_cmd.commands
assert len(res_cmds) == 1
res = res_cmds[0].insertion_objects
assert len(res) == 2
assert res[0]() == 'cmd_0'
assert res[1]() == 'cmd_1'
def test_add_insertion_command_to_multiple_insertion_commands_different():
check_fn = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name
cmd_0 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda:'cmd_0')
cmd_1 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_1'),
lambda:'cmd_1')
m_cmd = commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn
)
m_cmd.add_insertion_command(cmd_0)
m_cmd.add_insertion_command(cmd_1)
res_cmds = m_cmd.commands
assert len(res_cmds) == 2
res = res_cmds[0].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_0'
res = res_cmds[1].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_1'
def test_add_insertion_command_to_multiple_insertion_commands_invalid_input():
m_cmd = commands.TFMultipleInsertionCommands(commands.TFLayerWeight('layer_0', 'weights_0'))
cmd = commands.TFRemovalCommand(commands.TFLayer('layer_0'))
with pytest.raises(Exception):
m_cmd.add_insertion_command(cmd)
def test_multiple_insertion_commands_union_invalid_input():
cmd_0 = commands.TFMultipleInsertionCommands(commands.TFLayer('layer_0'))
cmd_1 = commands.TFMultipleInsertionCommands(commands.TFLayer('layer_1'))
with pytest.raises(Exception):
cmd_0.add_insertion_command(cmd_1)
def test_multiple_insertion_commands_union():
check_fn_0 = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name and \
dst.weights_attr_name == 'weight_0'
cmd_0 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_0')
m_cmd_0 = commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn_0,
commands=[cmd_0]
)
check_fn_1 = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name and \
dst.weights_attr_name == 'weight_1'
cmd_1 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_1'),
lambda:'cmd_1')
m_cmd_1 = commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn_1,
commands=[cmd_1]
)
m_cmd = m_cmd_0 + m_cmd_1
res_cmds = m_cmd.commands
assert len(res_cmds) == 2
res = res_cmds[0].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_0'
res = res_cmds[1].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_1'
def test_transformation_layout_insertion_case():
transformation_layout = TFTransformationLayout()
check_fn = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name
command_list = [
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_0',
TransformationPriority.SPARSIFICATION_PRIORITY),
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_1'),
lambda: 'cmd_1',
TransformationPriority.SPARSIFICATION_PRIORITY),
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_1', 'weight_0'),
lambda: 'cmd_2',
TransformationPriority.SPARSIFICATION_PRIORITY),
commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn,
commands=[
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_3',
TransformationPriority.PRUNING_PRIORITY)
]),
commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_1'),
check_target_points_fn=check_fn,
commands=[
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_1', 'weight_0'),
lambda: 'cmd_4',
TransformationPriority.PRUNING_PRIORITY),
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_1', 'weight_1'),
lambda: 'cmd_5',
TransformationPriority.PRUNING_PRIORITY)
]),
]
for cmd in command_list:
transformation_layout.register(cmd)
res_transformations = transformation_layout.transformations
assert len(res_transformations) == 2
assert res_transformations[0].type == TransformationType.MULTI_INSERT
assert res_transformations[0].target_point.type == TargetType.LAYER
assert res_transformations[0].target_point.layer_name == 'layer_0'
assert res_transformations[1].type == TransformationType.MULTI_INSERT
assert res_transformations[1].target_point.type == TargetType.LAYER
assert res_transformations[1].target_point.layer_name == 'layer_1'
res_cmds = res_transformations[0].commands
assert len(res_cmds) == 2
res = res_cmds[0].insertion_objects
assert len(res) == 2
assert res[0]() == 'cmd_3' and res[1]() == 'cmd_0'
res = res_cmds[1].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_1'
res_cmds = res_transformations[1].commands
assert len(res_cmds) == 2
res = res_cmds[0].insertion_objects
assert len(res) == 2
assert res[0]() == 'cmd_4' and res[1]() == 'cmd_2'
res = res_cmds[1].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_5'
def test_transformation_layout_removal_case():
transformation_layout = TFTransformationLayout()
command_list = [
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'sparsity_operation',
TransformationPriority.SPARSIFICATION_PRIORITY),
commands.TFRemovalCommand(commands.TFOperationWithWeights('layer_0', 'weight_0', 'sparsity_operation')),
commands.TFInsertionCommand(
commands.TFAfterLayer('layer_0'),
lambda: 'layer_1'
),
commands.TFRemovalCommand(commands.TFLayer('layer_1')),
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'pruning_operation',
TransformationPriority.PRUNING_PRIORITY
)
]
for cmd in command_list:
transformation_layout.register(cmd)
res_transformations = transformation_layout.transformations
assert len(res_transformations) == 5
assert res_transformations[0].type == TransformationType.INSERT
assert res_transformations[0].target_point.type == TargetType.OPERATION_WITH_WEIGHTS
assert res_transformations[0].target_point.layer_name == 'layer_0'
assert res_transformations[0].target_point.weights_attr_name == 'weight_0'
assert res_transformations[1].type == TransformationType.REMOVE
assert res_transformations[1].target_point.type == TargetType.OPERATION_WITH_WEIGHTS
assert res_transformations[1].target_point.layer_name == 'layer_0'
assert res_transformations[1].target_point.weights_attr_name == 'weight_0'
assert res_transformations[1].target_point.operation_name == 'sparsity_operation'
assert res_transformations[2].type == TransformationType.INSERT
assert res_transformations[2].target_point.type == TargetType.AFTER_LAYER
assert res_transformations[2].target_point.layer_name == 'layer_0'
assert res_transformations[3].type == TransformationType.REMOVE
assert res_transformations[3].target_point.type == TargetType.LAYER
assert res_transformations[3].target_point.layer_name == 'layer_1'
assert res_transformations[4].type == TransformationType.INSERT
assert res_transformations[4].target_point.type == TargetType.OPERATION_WITH_WEIGHTS
assert res_transformations[4].target_point.layer_name == 'layer_0'
assert res_transformations[4].target_point.weights_attr_name == 'weight_0'
CUSTOM_LAYER_NAME = "custom_layer_for_test"
class TwoWeightCustomLayerForTest(tf.keras.layers.Layer):
WEIGHT_1_NAME = 'w1'
WEIGHT_2_NAME = 'w2'
def __init__(self, name=CUSTOM_LAYER_NAME, trainable=True, dtype='float32'):
super().__init__(name=name, trainable=trainable, dtype=dtype)
self.w1 = self.add_weight(shape=(3, 1, 1, 3), name=self.WEIGHT_1_NAME)
self.w2 = self.add_weight(shape=(3, 1, 1, 3), name=self.WEIGHT_2_NAME)
def call(self, inputs, **kwargs):
x = tf.nn.conv2d(inputs, self.w1, strides=[1, 1, 1, 1], padding='SAME')
x = tf.nn.conv2d(x, self.w2, strides=[1, 1, 1, 1], padding='SAME')
return x
def ModelWithTwoWeightCustomLayer():
input_shape = (None, None, 3)
img_input = layers.Input(name='input', shape=input_shape)
x = img_input
x = TwoWeightCustomLayerForTest()(x) # custom!
model = models.Model(img_input, x, name='ModelForCustomLayerTest')
model.build([16, 16, 3])
return model
def create_transformed_model(transformation_layout: TFTransformationLayout):
model = ModelWithTwoWeightCustomLayer()
transformer = TFModelTransformer(model)
model = transformer.transform(transformation_layout)
return model
@NNCF_CUSTOM_OBJECTS.register()
class MockIdentityOp(NNCFOperation):
def build(self, input_shape, input_type, name, layer):
return {}
def call(self, inputs, weights, _):
return inputs
def test_multiple_insertion_command_has_same_effect_as_multiple_single_insertions():
check_fn = lambda src, dst: dst.type == TargetType.OPERATION_WITH_WEIGHTS
insertion_command_1 = TFInsertionCommand(
TFLayerWeight(CUSTOM_LAYER_NAME,
TwoWeightCustomLayerForTest.WEIGHT_1_NAME),
MockIdentityOp('mock_nncf_op_1'),
TransformationPriority.PRUNING_PRIORITY)
insertion_command_2 = TFInsertionCommand(
TFLayerWeight(CUSTOM_LAYER_NAME,
TwoWeightCustomLayerForTest.WEIGHT_2_NAME),
MockIdentityOp('mock_nncf_op_2'),
TransformationPriority.PRUNING_PRIORITY)
multiple_insertion_command = TFMultipleInsertionCommands(
target_point=TFLayer(CUSTOM_LAYER_NAME),
commands=[insertion_command_1, insertion_command_2],
check_target_points_fn=check_fn)
transformation_layout_multi = TFTransformationLayout()
transformation_layout_multi.register(multiple_insertion_command)
transformation_layout_two_single = TFTransformationLayout()
transformation_layout_two_single.register(insertion_command_1)
transformation_layout_two_single.register(insertion_command_2)
model_with_multi = create_transformed_model(transformation_layout_multi)
model_with_two_single = create_transformed_model(transformation_layout_two_single)
multi_config = model_with_multi.get_config()
two_single_config = model_with_two_single.get_config()
assert multi_config == two_single_config
def create_functional_model():
img_input = layers.Input(name='input', shape=(None, None, 3), dtype='float32')
x = layers.Conv2D(filters=16,
kernel_size=(3, 3),
strides=1,
padding="same",
activation='relu')(img_input)
residual = layers.Conv2D(filters=64,
kernel_size=(1, 1),
strides=2)(x)
residual = layers.BatchNormalization()(residual)
x = layers.Conv2D(filters=64,
kernel_size=(3, 3),
strides=2,
padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
x = layers.Conv2D(filters=64,
kernel_size=(3, 3),
strides=1,
padding="same")(x)
x = layers.BatchNormalization()(x)
x = tf.keras.layers.Add()([residual, x])
x = layers.Dense(units=10, activation='softmax')(x)
model = models.Model(img_input, x, name='ResnetBlockTest')
return model
def create_sequential_model():
model = tf.keras.Sequential()
model.add(layers.Input(shape=(None, None, 3)))
model.add(layers.Conv2D(filters=64,
kernel_size=(1, 1),
strides=2,
activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(2, activation="relu"))
return model
def apply_insert_after(model):
converter = TFModelConverterFactory.create(model)
transformations = TFTransformationLayout()
qconfig = QuantizerConfig(num_bits=8,
mode=QuantizationMode.SYMMETRIC,
signedness_to_force=None,
per_channel=False)
functional_model = is_functional_model(model)
for i, layer in enumerate(model.layers):
original_node_name = layer.name
if functional_model:
_, layer_info = converter.get_layer_info_for_node(original_node_name)
instance_idx = layer_info.instance_idx
else:
instance_idx = 0
fake_quantize_name = f'FakeQuantize_{i}/{original_node_name}'
fake_quantize_layer = FakeQuantize(
TFQuantizerSpec.from_config(qconfig, narrow_range=False, half_range=False),
name=fake_quantize_name)
transformations.register(
TFInsertionCommand(
target_point=commands.TFAfterLayer(original_node_name,
instance_idx=instance_idx,
output_port_id=0),
callable_object=fake_quantize_layer,
priority=TransformationPriority.QUANTIZATION_PRIORITY))
transformer = TFModelTransformer(model)
transformed_model = transformer.transform(transformations)
return transformed_model
def apply_insert_before(model):
converter = TFModelConverterFactory.create(model)
transformations = TFTransformationLayout()
qconfig = QuantizerConfig(num_bits=8,
mode=QuantizationMode.SYMMETRIC,
signedness_to_force=None,
per_channel=False)
functional_model = is_functional_model(model)
for i, layer in enumerate(model.layers):
# Insertion before input layer is not supported
if isinstance(layer, layers.InputLayer):
continue
original_node_name = layer.name
if functional_model:
_, layer_info = converter.get_layer_info_for_node(original_node_name)
instance_idx = layer_info.instance_idx
else:
instance_idx = 0
inputs = [layer.input] if isinstance(layer.input, tf.Tensor) else layer.input
for port, _ in enumerate(inputs):
fake_quantize_name = f'FakeQuantize_{i}.{port}/{original_node_name}'
fake_quantize_layer = FakeQuantize(
TFQuantizerSpec.from_config(qconfig, narrow_range=False, half_range=False),
name=fake_quantize_name)
transformations.register(
TFInsertionCommand(
target_point=commands.TFBeforeLayer(original_node_name,
instance_idx=instance_idx,
input_port_id=port),
callable_object=fake_quantize_layer,
priority=TransformationPriority.QUANTIZATION_PRIORITY))
transformer = TFModelTransformer(model)
transformed_model = transformer.transform(transformations)
return transformed_model
def check_graphs(model, ref_graph_filename):
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'model_transormer')
ref_graph_path = os.path.abspath(os.path.join(data_dir, ref_graph_filename))
graph, graph_to_layer_var_names_map = keras_model_to_tf_graph(model)
nx_graph = get_nx_graph_from_tf_graph(graph, graph_to_layer_var_names_map)
if not os.path.exists(ref_graph_path) and os.getenv("NNCF_TEST_REGEN_DOT") is not None:
nx.drawing.nx_pydot.write_dot(nx_graph, ref_graph_path)
check_nx_graph(nx_graph, ref_graph_path)
def test_functional_insert_after():
model = create_functional_model()
transformed_model = apply_insert_after(model)
check_graphs(transformed_model, 'functional_insert_after.dot')
def test_functional_insert_before():
model = create_functional_model()
transformed_model = apply_insert_before(model)
check_graphs(transformed_model, 'functional_insert_before.dot')
def | |
<reponame>ishine/joint-disfluency-detector-and-parser
"""tb.py reads, searches and displays trees from Penn Treebank (PTB) format
treebank files.
<NAME>, 14th January, 2012, last modified 15th November 2018
Trees are represented in Python as nested list structures in the following
format:
Terminal nodes are represented by strings.
Nonterminal nodes are represented by lists. The first element of
the list is the node's label (a string), and the remaining elements
of the list are lists representing the node's children.
This module also defines two regular expressions.
nonterm_rex matches Penn treebank nonterminal labels, and parses them into
their various parts.
empty_rex matches empty elements (terminals), and parses them into their
various parts.
"""
import collections, glob, re, sys
PTB_base_dir = "/usr/local/data/LDC/LDC2015T13_eng_news_txt_tbnk-ptb_revised/" # read PTB from here
_header_re = re.compile(r"(\*x\*.*\*x\*[ \t]*\n)*\s*")
_openpar_re = re.compile(r"\s*\(\s*([^ \t\n\r\f\v()]*)\s*")
_closepar_re = re.compile(r"\s*\)\s*")
_terminal_re = re.compile(r"\s*([^ \t\n\r\f\v()]*)\s*")
# This is such a complicated regular expression that I use the special
# "verbose" form of regular expressions, which lets me index and document it
#
nonterm_rex = re.compile(r"""
^(?P<CAT>[A-Z0-9$|^]+) # category comes first
(?: # huge disjunct of optional annotations
- (?:(?P<FORMFUN>ADV|NOM) # stuff beginning with -
|(?P<GROLE>DTV|LGS|PRD|PUT|SBJ|TPC|VOC)
|(?P<ADV>BNF|DIR|EXT|LOC|MNR|PRP|TMP)
|(?P<MISC>CLR|CLF|HLN|SEZ|TTL)
|(?P<TPC>TPC)
|(?P<DYS>UNF|ETC|IMP)
|(?P<INDEX>[0-9]+)
)
| = (?P<EQINDEX>[0-9]+) # stuff beginning with =
)* # Kleene star
$""", re.VERBOSE)
primarycategory_rex = re.compile(r"""^[\^]?([A-Z0-9$]+)(?:$|[-|^=])""") # Group 1 matches primary category in node label
empty_rex = re.compile(r"^(?P<CAT>[A-Z0-9\?\*]+)(?:-(?P<INDEX>\d+))")
def read_file(filename):
"""Returns the trees in the PTB file filename."""
filecontents = open(filename, "rU").read()
pos = _header_re.match(filecontents).end()
trees = []
_string_trees(trees, filecontents, pos)
return trees
def string_trees(s):
"""Returns a list of the trees in PTB-format string s"""
trees = []
_string_trees(trees, s)
return trees
def _string_trees(trees, s, pos=0):
"""Reads a sequence of trees in string s[pos:].
Appends the trees to the argument trees.
Returns the ending position of those trees in s."""
while pos < len(s):
closepar_mo = _closepar_re.match(s, pos)
if closepar_mo:
return closepar_mo.end()
openpar_mo = _openpar_re.match(s, pos)
if openpar_mo:
tree = [openpar_mo.group(1)]
trees.append(tree)
pos = _string_trees(tree, s, openpar_mo.end())
else:
terminal_mo = _terminal_re.match(s, pos)
trees.append(terminal_mo.group(1))
pos = terminal_mo.end()
return pos
def make_nonterminal(label, children):
"""returns a tree node with root node label and children"""
return [label]+children
def make_terminal(word):
"""returns a terminal tree node with label word"""
return word
def make_preterminal(label, word):
"""returns a preterminal node with label for word"""
return [label, word]
def is_terminal(subtree):
"""True if this subtree consists of a single terminal node
(i.e., a word or an empty node)."""
return not isinstance(subtree, list)
def is_nonterminal(subtree):
"""True if this subtree does not consist of a single terminal node
(i.e., a word or an empty node)."""
return isinstance(subtree, list)
def is_preterminal(subtree):
"""True if the treebank subtree is rooted in a preterminal node
(i.e., is an empty node or dominates a word)."""
return isinstance(subtree, list) and len(subtree) == 2 and is_terminal(subtree[1])
def is_phrasal(subtree):
"""True if this treebank subtree is not a terminal or a preterminal node."""
return isinstance(subtree, list) and \
(len(subtree) == 1 or isinstance(subtree[1], list))
_empty_cats = ("-NONE-","-DFL-")
def is_empty(subtree):
"""True if this subtree is a preterminal node dominating an empty node"""
return is_preterminal(subtree) and tree_category(subtree) in _empty_cats
_punctuation_cats = ("''",":","#",",",".","``","-LRB-","-RRB-")+_empty_cats
def is_punctuation(subtree):
"""True if this subtree is a preterminal node dominating a punctuation or
empty node."""
return is_preterminal(subtree) and tree_category(subtree) in _punctuation_cats
_partial_word_rex = re.compile(r"^[a-zA-Z]+[-]$") # matches non-punctuation words that end in "-"
def is_partial_word(subtree):
"""True if this subtree is a preterminal node dominating a partial word."""
if is_preterminal(subtree):
term = subtree[1]
if (_partial_word_rex.match(term) or
term == "MUMBLEx" or
tree_category(subtree) == "XX"):
return True
return False
def tree_children(tree):
"""Returns the children subtrees of tree"""
if isinstance(tree, list):
return tree[1:]
else:
return []
def tree_label(tree):
"""Returns the label on the root node of tree."""
if isinstance(tree, list):
return tree[0]
else:
return tree
def label_category(label):
"""Returns the category part of a node label."""
nonterm_mo = nonterm_rex.match(label)
if nonterm_mo:
return nonterm_mo.group('CAT')
else:
return label
def label_primarycategory(label):
"""Returns the primary category part of a node label."""
primary_mo = primarycategory_rex.match(label)
if primary_mo:
return primary_mo.group(1)
else:
return label
def tree_category(tree):
"""Returns the category of the root node of tree."""
if isinstance(tree, list):
return label_category(tree[0])
else:
return tree
def tree_primarycategory(tree):
"""Returns the primary category of the root node of tree."""
if isinstance(tree, list):
return label_primarycategory(tree[0])
else:
return tree
def map_labels(tree, fn):
"""Returns a tree in which every node's label is mapped by fn"""
if isinstance(tree, list):
return [fn(tree[0])]+[map_labels(child,fn) for child in tree[1:]]
else:
return tree
def map_subtrees(tree, fn):
"""Returns a tree in which every subtree is mapped by fn.
fn() is called on each subtree of tree after all of its children
have been mapped.
"""
if isinstance(tree, list):
return fn([map_subtrees(child, fn) if i > 0 else child
for i, child in enumerate(tree)])
else:
return fn(tree)
def label_noindices(label):
"""Removes indices in label if present"""
label_mo = nonterm_rex.match(label)
if label_mo:
start = max(label_mo.end('INDEX'), label_mo.end('EQINDEX'))
if start > 1:
return label[:start-2]
return label
def tree_children(tree):
"""Returns a list of the subtrees of tree."""
if isinstance(tree, list):
return tree[1:]
else:
return []
def tree_copy(tree):
"""Returns a deep copy of tree"""
if isinstance(tree, list):
return [tree_copy(child) for child in tree]
else:
return tree
def prune(tree, remove_empty=False,
remove_partial=False,
remove_punctuation=False,
collapse_unary=False,
binarise=False,
relabel=lambda x: x):
"""Returns a copy of tree without empty nodes, unary nodes or node indices.
If binarise=='right' then right-binarise nodes, otherwise
if binarise is not False then left-binarise nodes.
"""
def left_binarise(cs, rightpos):
label = '.'.join(tree_label(cs[i]) for i in range(rightpos))
if rightpos <= 2:
return make_nonterminal(label, cs[:rightpos])
else:
return make_nonterminal(label, [left_binarise(cs, rightpos-1),cs[rightpos-1]])
def right_binarise(cs, leftpos, len_cs):
label = '.'.join(tree_label(c) for c in cs[leftpos:])
if leftpos + 2 >= len_cs:
return make_nonterminal(label, cs[leftpos:])
else:
return make_nonterminal(label, [cs[leftpos], right_binarise(cs, leftpos+1, len_cs)])
label = tree_label(tree)
if is_phrasal(tree):
cs = (prune(c, remove_empty, remove_partial, remove_punctuation, collapse_unary, binarise, relabel)
for c in tree_children(tree))
cs = [c for c in cs if c]
if cs or not remove_empty:
len_cs = len(cs)
if collapse_unary and len_cs == 1:
return make_nonterminal(relabel(label),
tree_children(cs[0]))
elif binarise and len_cs > 2:
if binarise=='right':
return make_nonterminal(relabel(label),
[cs[0], right_binarise(cs, 1, len_cs)])
else:
return make_nonterminal(relabel(label),
[left_binarise(cs, len_cs-1), cs[-1]])
else:
return make_nonterminal(relabel(label),
cs)
else:
return None
elif is_preterminal(tree):
if remove_empty and label in _empty_cats:
return None
if remove_partial and is_partial_word(tree):
return None
if remove_punctuation and is_punctuation(tree):
return None
return make_nonterminal(relabel(label), tree_children(tree))
else:
return tree
def tree_nodes(tree):
"""Yields all the nodes in tree."""
def visit(node):
yield node
if isinstance(node, list):
for child in node[1:]:
yield from visit(child)
yield from visit(tree)
def tree_terminals(tree):
"""Yields the terminal or leaf nodes of tree."""
def visit(node):
if isinstance(node, list):
for child in node[1:]:
yield from visit(child)
else:
yield node
yield from visit(tree)
def tree_preterminalnodes(tree):
"""Yields the preterminal nodes of tree."""
def visit(node):
if is_preterminal(node):
yield node
else:
for child in node[1:]:
yield from visit(child)
yield from visit(tree)
def tree_preterminallabels(tree):
"""Yields the labels of the preterminal nodes in tree."""
def visit(node):
if is_preterminal(node):
yield node[0]
else:
for child in node[1:]:
yield from visit(child)
yield from visit(tree)
def tree_phrasalnodes(tree):
"""Yields the phrasal (i.e., nonterminal and non-preterminal) nodes of tree"""
def visit(node):
if is_phrasal(node):
yield node
for child in node[1:]:
yield from visit(child)
yield from visit(tree)
Constituent = collections.namedtuple('Constituent', ('label', 'left', 'right'))
def tree_constituents(tree,
include_root=False,
include_terminals=False,
include_preterminals=False,
ignore_punctuation=True,
labelfn=tree_label):
"""Returns a list of Constituent tuples (label,left,right) for each
constituent in the tree, where left and right are integer string
positions, and label is obtained by applying labelfn to the tree
node.
If include_root==True, then the list of tuples includes a tuple
for the root node of the tree.
If include_terminals==True, then the list of tuples includes tuples
for the terminal nodes of the tree.
If include_preterminals==True, then the list of tuples includes tuples
for the preterminal nodes of the tree.
If ignore_punctuation==True, then the left and right positions ignore
punctuation.
"""
def visitor(node, left, constituents):
if ignore_punctuation and is_punctuation(node):
return left
if is_terminal(node):
if include_terminals:
constituents.append(Constituent(labelfn(node),left,left+1))
return left+1
else:
right = left
for child in tree_children(node):
right = visitor(child, right, constituents)
if include_preterminals or is_phrasal(node):
constituents.append(Constituent(labelfn(node),left,right))
return right
constituents = []
if include_root:
visitor(tree, 0, constituents)
else:
right = 0
for child in tree_children(tree):
right = visitor(child, | |
<filename>rlax/_src/distributions_test.py<gh_stars>0
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `distributions.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax.tree_util import tree_map
import numpy as np
from rlax._src import distributions
class SoftmaxTest(parameterized.TestCase):
def setUp(self):
super(SoftmaxTest, self).setUp()
self.logits = np.array([[1, 1, 0], [1, 2, 0]], dtype=np.float32)
self.samples = np.array([0, 1], dtype=np.int32)
self.expected_probs = np.array( # softmax with temperature=10
[[0.34425336, 0.34425336, 0.31149334],
[0.332225, 0.3671654, 0.3006096]],
dtype=np.float32)
probs = np.array( # softmax with temperature=1
[[0.42231882, 0.42231882, 0.15536241],
[0.24472848, 0.66524094, 0.09003057]],
dtype=np.float32)
logprobs = np.log(probs)
self.expected_logprobs = np.array(
[logprobs[0][self.samples[0]], logprobs[1][self.samples[1]]])
self.expected_entropy = -np.sum(probs * logprobs, axis=-1)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_softmax_probs(self, compile_fn, place_fn):
"""Tests for a single element."""
distrib = distributions.softmax(temperature=10.)
# Optionally compile.
softmax = compile_fn(distrib.probs)
# For each element in the batch.
for logits, expected in zip(self.logits, self.expected_probs):
# Optionally convert to device array.
logits = place_fn(logits)
# Test outputs.
actual = softmax(logits)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_softmax_probs_batch(self, compile_fn, place_fn):
"""Tests for a full batch."""
distrib = distributions.softmax(temperature=10.)
# Vmap and optionally compile.
softmax = compile_fn(distrib.probs)
# Optionally convert to device array.
logits = place_fn(self.logits)
# Test softmax output in batch.
actual = softmax(logits)
np.testing.assert_allclose(self.expected_probs, actual, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_softmax_logprob(self, compile_fn, place_fn):
"""Tests for a single element."""
distrib = distributions.softmax()
# Optionally compile.
logprob_fn = compile_fn(distrib.logprob)
# For each element in the batch.
for logits, samples, expected in zip(
self.logits, self.samples, self.expected_logprobs):
# Optionally convert to device array.
logits, samples = tree_map(place_fn, (logits, samples))
# Test output.
actual = logprob_fn(samples, logits)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_softmax_logprob_batch(self, compile_fn, place_fn):
"""Tests for a full batch."""
distrib = distributions.softmax()
# Vmap and optionally compile.
logprob_fn = compile_fn(distrib.logprob)
# Optionally convert to device array.
logits, samples = tree_map(place_fn, (self.logits, self.samples))
# Test softmax output in batch.
actual = logprob_fn(samples, logits)
np.testing.assert_allclose(self.expected_logprobs, actual, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_softmax_entropy(self, compile_fn, place_fn):
"""Tests for a single element."""
distrib = distributions.softmax()
# Optionally compile.
entropy_fn = compile_fn(distrib.entropy)
# For each element in the batch.
for logits, expected in zip(self.logits, self.expected_entropy):
# Optionally convert to device array.
logits = place_fn(logits)
# Test outputs.
actual = entropy_fn(logits)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_softmax_entropy_batch(self, compile_fn, place_fn):
"""Tests for a full batch."""
distrib = distributions.softmax()
# Vmap and optionally compile.
entropy_fn = compile_fn(distrib.entropy)
# Optionally convert to device array.
logits = place_fn(self.logits)
# Test softmax output in batch.
actual = entropy_fn(logits)
np.testing.assert_allclose(self.expected_entropy, actual, atol=1e-4)
class GreedyTest(parameterized.TestCase):
def setUp(self):
super(GreedyTest, self).setUp()
self.preferences = np.array([[1, 1, 0], [1, 2, 0]], dtype=np.float32)
self.samples = np.array([0, 1], dtype=np.int32)
self.expected_probs = np.array(
[[0.5, 0.5, 0.], [0., 1., 0.]], dtype=np.float32)
self.expected_logprob = np.array(
[-0.6931472, 0.], dtype=np.float32)
self.expected_entropy = np.array(
[0.6931472, 0.], dtype=np.float32)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_greedy_probs(self, compile_fn, place_fn):
"""Tests for a single element."""
distrib = distributions.greedy()
# Optionally compile.
greedy = compile_fn(distrib.probs)
# For each element in the batch.
for preferences, expected in zip(self.preferences, self.expected_probs):
# Optionally convert to device array.
preferences = place_fn(preferences)
# Test outputs.
actual = greedy(preferences)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_greedy_probs_batch(self, compile_fn, place_fn):
"""Tests for a full batch."""
distrib = distributions.greedy()
# Vmap and optionally compile.
greedy = compile_fn(distrib.probs)
# Optionally convert to device array.
preferences = place_fn(self.preferences)
# Test greedy output in batch.
actual = greedy(preferences)
np.testing.assert_allclose(self.expected_probs, actual, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_greedy_logprob(self, compile_fn, place_fn):
"""Tests for a single element."""
distrib = distributions.greedy()
# Optionally compile.
logprob_fn = compile_fn(distrib.logprob)
# For each element in the batch.
for preferences, samples, expected in zip(
self.preferences, self.samples, self.expected_logprob):
# Optionally convert to device array.
preferences, samples = tree_map(place_fn, (preferences, samples))
# Test output.
actual = logprob_fn(samples, preferences)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_greedy_logprob_batch(self, compile_fn, place_fn):
"""Tests for a full batch."""
distrib = distributions.greedy()
# Vmap and optionally compile.
logprob_fn = compile_fn(distrib.logprob)
# Optionally convert to device array.
preferences, samples = tree_map(place_fn, (self.preferences, self.samples))
# Test greedy output in batch.
actual = logprob_fn(samples, preferences)
np.testing.assert_allclose(self.expected_logprob, actual, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_greedy_entropy(self, compile_fn, place_fn):
"""Tests for a single element."""
distrib = distributions.greedy()
# Optionally compile.
entropy_fn = compile_fn(distrib.entropy)
# For each element in the batch.
for preferences, expected in zip(self.preferences, self.expected_entropy):
# Optionally convert to device array.
preferences = place_fn(preferences)
# Test outputs.
actual = entropy_fn(preferences)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_greedy_entropy_batch(self, compile_fn, place_fn):
"""Tests for a full batch."""
distrib = distributions.greedy()
# Vmap and optionally compile.
entropy_fn = compile_fn(distrib.entropy)
# Optionally convert to device array.
preferences = place_fn(self.preferences)
# Test greedy output in batch.
actual = entropy_fn(preferences)
np.testing.assert_allclose(self.expected_entropy, actual, atol=1e-4)
class EpsilonGreedyTest(parameterized.TestCase):
def setUp(self):
super(EpsilonGreedyTest, self).setUp()
self.epsilon = 0.2
self.preferences = np.array([[1, 1, 0, 0], [1, 2, 0, 0]], dtype=np.float32)
self.samples = np.array([0, 1], dtype=np.int32)
self.expected_probs = np.array(
[[0.45, 0.45, 0.05, 0.05], [0.05, 0.85, 0.05, 0.05]], dtype=np.float32)
self.expected_logprob = np.array(
[-0.7985077, -0.1625189], dtype=np.float32)
self.expected_entropy = np.array(
[1.01823008, 0.58750093], dtype=np.float32)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_greedy_probs(self, compile_fn, place_fn):
"""Tests for a single element."""
distrib = distributions.epsilon_greedy(self.epsilon)
# Optionally compile.
probs_fn = compile_fn(distrib.probs)
# For each element in the batch.
for preferences, expected in zip(self.preferences, self.expected_probs):
# Optionally convert to device array.
preferences = place_fn(preferences)
# Test outputs.
actual = probs_fn(preferences)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_greedy_probs_batch(self, compile_fn, place_fn):
"""Tests for a full batch."""
distrib = distributions.epsilon_greedy(self.epsilon)
# Vmap and optionally compile.
probs_fn = compile_fn(distrib.probs)
# Optionally convert to device array.
preferences = place_fn(self.preferences)
# Test greedy output in batch.
actual = probs_fn(preferences)
np.testing.assert_allclose(self.expected_probs, actual, atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_greedy_logprob(self, compile_fn, place_fn):
"""Tests for a single element."""
distrib = distributions.epsilon_greedy(self.epsilon)
# Optionally compile.
logprob_fn = compile_fn(distrib.logprob)
# For each element in the batch.
for preferences, samples, expected in zip(
self.preferences, self.samples, self.expected_logprob):
# Optionally convert to device array.
preferences, samples = tree_map(place_fn, (preferences, samples))
# Test output.
actual = logprob_fn(samples, preferences)
| |
import os
import sys
import json
import copy
import time
import numpy as np
import pandas as pd
import logging
from supervised.model_framework import ModelFramework
from supervised.callbacks.early_stopping import EarlyStopping
from supervised.callbacks.metric_logger import MetricLogger
from supervised.callbacks.time_constraint import TimeConstraint
from supervised.utils.metric import Metric
from supervised.algorithms.registry import AlgorithmsRegistry
from supervised.algorithms.registry import (
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
REGRESSION,
)
from supervised.tuner.mljar_tuner import MljarTuner
from supervised.ensemble import Ensemble
from supervised.utils.additional_metrics import AdditionalMetrics
from supervised.utils.config import LOG_LEVEL
from supervised.preprocessing.exclude_missing_target import ExcludeRowsMissingTarget
logging.basicConfig(
format="%(asctime)s %(name)s %(levelname)s %(message)s", level=logging.ERROR
)
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
from supervised.exceptions import AutoMLException
import gc
from supervised.utils.config import mem
from tabulate import tabulate
class AutoML:
"""
Automated Machine Learning for supervised tasks (binary classification, multiclass classification, regression).
"""
def __init__(
self,
results_path=None,
total_time_limit=60 * 60,
model_time_limit=None,
algorithms=["Baseline", "Decision Tree", "Random Forest", "Xgboost"],
tuning_mode="Sport",
train_ensemble=True,
optimize_metric=None,
validation={"validation_type": "kfold", "k_folds": 5, "shuffle": True},
verbose=True,
ml_task=None,
seed=1,
):
"""
Create the AutoML object. Initialize directory for results.
:param results_path: The path where all results will be saved.
If left `None` then the name of directory will be generated, with schema: AutoML_{number},
where number can be from 1 to 100 - depends which direcory name will be available.
If the `results_path` will point to directory with AutoML results, then all models will be loaded.
:param total_time_limit: The time limit in seconds for AutoML training. It is not used when `model_time_limit` is not `None`.
:param model_time_limit: The time limit in seconds for training single model.
If `model_time_limit` is set, the `total_time_limit` is not respected.
Single model can contain several learners, for example in the case of 10-fold cross-validation, one model will have 10 learners.
Based on `model_time_limit` the time limit for single learner is computed.
:param algorithms: The list of algorithms that will be used in the training.
:param tuning_mode: The mode for tuning. It can be: `Normal`, `Sport`, `Insane`, `Perfect`. The names are kept the same as in https://mljar.com application.
Each mode describe how many models will be checked:
- `Normal` - about 5-10 models of each algorithm will be trained,
- `Sport` - about 10-15 models of each algorithm will be trained,
- `Insane` - about 15-20 models of each algorithm will be trained,
- `Perfect` - about 25-35 models of each algorithm will be trained.
You can also set how many models will be trained with `set_advanced` method.
:param train_ensemble: If true then at the end of models training the ensemble will be created.
:param optimize_metric: The metric to be optimized. (not implemented yet, please left `None`)
:param validation: The JSON with validation type. Right now only Cross-Validation is supported.
The example JSON parameters for validation:
```
{"validation_type": "kfold", "k_folds": 5, "shuffle": True, "stratify": True, "random_seed": 123}
```
:param verbose: Not implemented yet.
:param ml_task: The machine learning task that will be solved. Can be: `"binary_classification", "multiclass_classification", "regression"`.
If left `None` AutoML will try to guess the task based on target values.
If there will be only 2 values in the target, then task will be set to `"binary_classification"`.
If number of values in the target will be between 2 and 20 (included), then task will be set to `"multiclass_classification"`.
In all other casses, the task is set to `"regression"`.
:param seed: The seed for random generator.
"""
logger.debug("AutoML.__init__")
# total_time_limit is the time for computing for all models
# model_time_limit is the time for computing a single model
# if model_time_limit is None then its value is computed from total_time_limit
# if total_time_limit is set and model_time_limit is set, then total_time_limit constraint will be omitted
self._total_time_limit = total_time_limit
self._model_time_limit = model_time_limit
# time limit in seconds for single learner (model consists of learners)
# the value is computed before fit, initilize with any number
self._time_limit = 1
self._train_ensemble = train_ensemble
self._models = [] # instances of iterative learner framework or ensemble
# it is instance of model framework or ensemble
self._best_model = None
self._validation = validation
self.set_tuning_mode(tuning_mode)
self._algorithms = algorithms
self._verbose = verbose
self._fit_time = None
self._models_train_time = {}
self._threshold, self._metrics_details, self._max_metrics, self._confusion_matrix = (
None,
None,
None,
None,
)
self._seed = seed
self._user_set_optimize_metric = optimize_metric
self._ml_task = ml_task
self._X_train_path, self._y_train_path = None, None
self._X_validation_path, self._y_validation_path = None, None
self._data_info = None
self._model_paths = []
self._results_path = results_path
self._set_results_dir()
def set_tuning_mode(self, mode="Normal"):
if mode == "Sport":
self._start_random_models = 10
self._hill_climbing_steps = 2
self._top_models_to_improve = 3
elif mode == "Insane":
self._start_random_models = 15
self._hill_climbing_steps = 3
self._top_models_to_improve = 4
elif mode == "Perfect":
self._start_random_models = 25
self._hill_climbing_steps = 5
self._top_models_to_improve = 5
else: # Normal
self._start_random_models = 5
self._hill_climbing_steps = 1
self._top_models_to_improve = 2
self._tuner_params = {
"start_random_models": self._start_random_models,
"hill_climbing_steps": self._hill_climbing_steps,
"top_models_to_improve": self._top_models_to_improve,
}
def set_advanced(
self, start_random_models=1, hill_climbing_steps=0, top_models_to_improve=0
):
"""
Advanced set of tuning parameters.
:param start_random_models: Number of not-so-random models to check for each algorithm.
:param hill_climbing_steps: Number of hill climbing steps during tuning.
:param top_models_to_improve: Number of top models (of each algorithm) which will be considered for improving in hill climbing steps.
"""
self._start_random_models = start_random_models
self._hill_climbing_steps = hill_climbing_steps
self._top_models_to_improve = top_models_to_improve
self._tuner_params = {
"start_random_models": self._start_random_models,
"hill_climbing_steps": self._hill_climbing_steps,
"top_models_to_improve": self._top_models_to_improve,
}
def _set_results_dir(self):
if self._results_path is None:
found = False
for i in range(1, 101):
self._results_path = f"AutoML_{i}"
if not os.path.exists(self._results_path):
found = True
break
if not found:
raise AutoMLException("Cannot create directory for AutoML results")
if os.path.exists(self._results_path) and os.path.exists(
os.path.join(self._results_path, "params.json")
):
print(f"Directory {self._results_path} already exists")
self.load()
elif self._results_path is not None:
if not os.path.exists(self._results_path):
print(f"Create directory {self._results_path}")
try:
os.mkdir(self._results_path)
except Exception as e:
raise AutoMLException(
f"Cannot create directory {self._results_path}"
)
elif os.path.exists(self._results_path) and len(
os.listdir(self._results_path)
):
raise AutoMLException(
f"Cannot set directory for AutoML. Directory {self._results_path} is not empty."
)
else:
raise AutoMLException("Cannot set directory for AutoML results")
def load(self):
logger.info("Loading AutoML models ...")
try:
params = json.load(open(os.path.join(self._results_path, "params.json")))
self._model_paths = params["saved"]
self._ml_task = params["ml_task"]
self._optimize_metric = params["optimize_metric"]
models_map = {}
for model_path in self._model_paths:
if model_path.endswith("ensemble"):
ens = Ensemble.load(model_path, models_map)
models_map[ens.get_name()] = ens
else:
m = ModelFramework.load(model_path)
self._models += [m]
models_map[m.get_name()] = m
best_model_name = None
with open(os.path.join(self._results_path, "best_model.txt"), "r") as fin:
best_model_name = fin.read()
self._best_model = models_map[best_model_name]
data_info_path = os.path.join(self._results_path, "data_info.json")
self._data_info = json.load(open(data_info_path))
except Exception as e:
raise AutoMLException(f"Cannot load AutoML directory. {str(e)}")
def _estimate_training_times(self):
algo_cnt = len(self._algorithms)
if "Baseline" in self._algorithms:
algo_cnt -= 1
self._estimated_models_to_check = algo_cnt * self._start_random_models
if self._estimated_models_to_check > self._top_models_to_improve:
self._estimated_models_to_check += (
self._top_models_to_improve * self._hill_climbing_steps * 2
)
if "Baseline" in self._algorithms:
self._estimated_models_to_check += 1
if self._model_time_limit is not None:
k = self._validation.get("k_folds", 1.0)
self._time_limit = self._model_time_limit / k
elif self._total_time_limit is not None:
# set time limit for single model training
# the 0.85 is safe scale factor, to not exceed time limit
# scaling is added because number of models to be trained are estimate
k = self._validation.get("k_folds", 1.0)
self._time_limit = (
self._total_time_limit * 0.85 / self._estimated_models_to_check / k
)
print(
f"AutoML will try to check about {int(self._estimated_models_to_check)} model{'s' if int(self._estimated_models_to_check)>1 else ''}"
)
def get_leaderboard(self):
ldb = {
"name": [],
"model_type": [],
"metric_type": [],
"metric_value": [],
"train_time": [],
}
for m in self._models:
ldb["name"] += [m.get_name()]
ldb["model_type"] += [m.get_type()]
ldb["metric_type"] += [self._optimize_metric]
ldb["metric_value"] += [m.get_final_loss()]
ldb["train_time"] += [np.round(m.get_train_time(), 2)]
return pd.DataFrame(ldb)
def get_additional_metrics(self):
additional_metrics = self._best_model.get_additional_metrics()
# AdditionalMetrics.compute(
# oof_predictions[target_cols],
# oof_predictions[prediction_cols],
# self._ml_task,
# )
if self._ml_task == BINARY_CLASSIFICATION:
self._metrics_details = additional_metrics["metric_details"]
self._max_metrics = additional_metrics["max_metrics"]
self._confusion_matrix = additional_metrics["confusion_matrix"]
self._threshold = additional_metrics["threshold"]
logger.info(
"Metric details:\n{}\n\nConfusion matrix:\n{}".format(
self._max_metrics.transpose(), self._confusion_matrix
)
)
with open(
os.path.join(self._results_path, "best_model_metrics.txt"), "w"
) as fout:
fout.write(
"Metric details:\n{}\n\nConfusion matrix:\n{}".format(
self._max_metrics.transpose(), self._confusion_matrix
)
)
elif self._ml_task == MULTICLASS_CLASSIFICATION:
max_metrics = additional_metrics["max_metrics"]
confusion_matrix = additional_metrics["confusion_matrix"]
logger.info(
"Metric details:\n{}\nConfusion matrix:\n{}".format(
max_metrics, confusion_matrix
)
)
with open(
os.path.join(self._results_path, "best_model_metrics.txt"), "w"
) as fout:
fout.write("Metric details:\n{}\n\n".format(max_metrics.transpose()))
fout.write("Confusion matrix:\n{}".format(confusion_matrix))
def keep_model(self, model):
if model is None:
return
self._models += [model]
self.verbose_print(
"{} final {} {} time {} seconds".format(
model.get_type(),
self._optimize_metric,
model.get_final_loss(),
np.round(model.get_train_time(), 2),
)
)
self.log_train_time(model.get_type(), model.get_train_time())
def train_model(self, params):
model_path = os.path.join(self._results_path, params["name"])
early_stop = EarlyStopping(
{"metric": {"name": self._optimize_metric}, "log_to_dir": model_path}
)
time_constraint = | |
Tlist = numpy.arange(original_kinetics.Tmin, original_kinetics.Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientPDepArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the PDepArrhenius format.
"""
from rmgpy.kinetics import PDepArrhenius
arrhenius0 = Arrhenius(
A = (1.0e6,"s^-1"),
n = 1.0,
Ea = (10.0,"kJ/mol"),
T0 = (300.0,"K"),
Tmin = (300.0,"K"),
Tmax = (2000.0,"K"),
comment = """This data is completely made up""",
)
arrhenius1 = Arrhenius(
A = (1.0e12,"s^-1"),
n = 1.0,
Ea = (20.0,"kJ/mol"),
T0 = (300.0,"K"),
Tmin = (300.0,"K"),
Tmax = (2000.0,"K"),
comment = """This data is completely made up""",
)
pressures = numpy.array([0.1, 10.0])
arrhenius = [arrhenius0, arrhenius1]
Tmin = 300.0
Tmax = 2000.0
Pmin = 0.1
Pmax = 10.0
comment = """This data is completely made up"""
original_kinetics = PDepArrhenius(
pressures = (pressures,"bar"),
arrhenius = arrhenius,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientMultiArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the MultiArrhenius format.
"""
from rmgpy.kinetics import MultiArrhenius
pressures = numpy.array([0.1, 10.0])
Tmin = 300.0
Tmax = 2000.0
Pmin = 0.1
Pmax = 10.0
comment = """This data is completely made up"""
arrhenius = [
Arrhenius(
A = (9.3e-14,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
Arrhenius(
A = (1.4e-9,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
]
original_kinetics = MultiArrhenius(
arrhenius = arrhenius,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientMultiPDepArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the MultiPDepArrhenius format.
"""
from rmgpy.kinetics import PDepArrhenius, MultiPDepArrhenius
Tmin = 350.
Tmax = 1500.
Pmin = 1e-1
Pmax = 1e1
pressures = numpy.array([1e-1,1e1])
comment = 'CH3 + C2H6 <=> CH4 + C2H5 (Baulch 2005)'
arrhenius = [
PDepArrhenius(
pressures = (pressures,"bar"),
arrhenius = [
Arrhenius(
A = (9.3e-16,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
Arrhenius(
A = (9.3e-14,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
],
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
),
PDepArrhenius(
pressures = (pressures,"bar"),
arrhenius = [
Arrhenius(
A = (1.4e-11,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
Arrhenius(
A = (1.4e-9,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
],
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
),
]
original_kinetics = MultiPDepArrhenius(
arrhenius = arrhenius,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientThirdBody(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the ThirdBody format.
"""
from rmgpy.kinetics import ThirdBody
arrheniusLow = Arrhenius(
A = (2.62e+33,"cm^6/(mol^2*s)"),
n = -4.76,
Ea = (10.21,"kJ/mol"),
T0 = (1,"K"),
)
efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2}
Tmin = 300.
Tmax = 2000.
Pmin = 0.01
Pmax = 100.
comment = """H + CH3 -> CH4"""
thirdBody = ThirdBody(
arrheniusLow = arrheniusLow,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
efficiencies = efficiencies,
comment = comment,
)
original_kinetics = thirdBody
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientLindemann(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the Lindemann format.
"""
from rmgpy.kinetics import Lindemann
arrheniusHigh = Arrhenius(
A = (1.39e+16,"cm^3/(mol*s)"),
n = -0.534,
Ea = (2.243,"kJ/mol"),
T0 = (1,"K"),
)
arrheniusLow = Arrhenius(
A = (2.62e+33,"cm^6/(mol^2*s)"),
n = -4.76,
Ea = (10.21,"kJ/mol"),
T0 = (1,"K"),
)
efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2}
Tmin = 300.
Tmax = 2000.
Pmin = 0.01
Pmax = 100.
comment = """H + CH3 -> CH4"""
lindemann = Lindemann(
arrheniusHigh = arrheniusHigh,
arrheniusLow = arrheniusLow,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
efficiencies = efficiencies,
comment = comment,
)
original_kinetics = lindemann
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientTroe(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the Troe format.
"""
from rmgpy.kinetics import Troe
arrheniusHigh = Arrhenius(
A = (1.39e+16,"cm^3/(mol*s)"),
n = -0.534,
Ea = (2.243,"kJ/mol"),
T0 = (1,"K"),
)
arrheniusLow = Arrhenius(
A = (2.62e+33,"cm^6/(mol^2*s)"),
n = -4.76,
Ea = (10.21,"kJ/mol"),
T0 = (1,"K"),
)
alpha = 0.783
T3 = 74
T1 = 2941
T2 = 6964
efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2}
Tmin = 300.
Tmax = 2000.
Pmin = 0.01
Pmax = 100.
comment = """H + CH3 -> CH4"""
troe = Troe(
arrheniusHigh = arrheniusHigh,
arrheniusLow = arrheniusLow,
alpha = alpha,
T3 = (T3,"K"),
T1 = (T1,"K"),
T2 = (T2,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
efficiencies = efficiencies,
comment = comment,
)
original_kinetics = troe
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testTSTCalculation(self):
"""
A test of the transition state theory k(T) calculation function,
using the reaction H + C2H4 -> C2H5.
"""
Tlist = 1000.0/numpy.arange(0.4, 3.35, 0.01)
klist = numpy.array([self.reaction.calculateTSTRateCoefficient(T) for T in Tlist])
arrhenius = Arrhenius().fitToData(Tlist, klist, kunits='m^3/(mol*s)')
klist2 = numpy.array([arrhenius.getRateCoefficient(T) for T | |
if 'team' in params:
form_params.append(('team', params['team'])) # noqa: E501
if 'user' in params:
form_params.append(('user', params['user'])) # noqa: E501
if 'is_admin' in params:
form_params.append(('is_admin', params['is_admin'])) # noqa: E501
if 'team' in params:
form_params.append(('team', params['team'])) # noqa: E501
if 'user' in params:
form_params.append(('user', params['user'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/team_membership/{id}/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamMembership', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def partial_update_team_membership(self, id, **kwargs): # noqa: E501
"""partial_update_team_membership # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.partial_update_team_membership(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param bool is_admin:
:param int team:
:param int user:
:param str id2: id
:param str is_admin2: is_admin
:param str team2: team
:param str user2: user
:return: TeamMembership
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.partial_update_team_membership_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.partial_update_team_membership_with_http_info(id, **kwargs) # noqa: E501
return data
def partial_update_team_membership_with_http_info(self, id, **kwargs): # noqa: E501
"""partial_update_team_membership # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.partial_update_team_membership_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param bool is_admin:
:param int team:
:param int user:
:param str id2: id
:param str is_admin2: is_admin
:param str team2: team
:param str user2: user
:return: TeamMembership
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'is_admin', 'team', 'user', 'id2', 'is_admin2', 'team2', 'user2'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method partial_update_team_membership" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `partial_update_team_membership`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'is_admin2' in params:
query_params.append(('is_admin', params['is_admin2'])) # noqa: E501
if 'team2' in params:
query_params.append(('team', params['team2'])) # noqa: E501
if 'user2' in params:
query_params.append(('user', params['user2'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'is_admin' in params:
form_params.append(('is_admin', params['is_admin'])) # noqa: E501
if 'team' in params:
form_params.append(('team', params['team'])) # noqa: E501
if 'user' in params:
form_params.append(('user', params['user'])) # noqa: E501
if 'is_admin' in params:
form_params.append(('is_admin', params['is_admin'])) # noqa: E501
if 'team' in params:
form_params.append(('team', params['team'])) # noqa: E501
if 'user' in params:
form_params.append(('user', params['user'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/team_membership/{id}/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamMembership', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_team_membership(self, id, **kwargs): # noqa: E501
"""retrieve_team_membership # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_team_membership(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str id2: id
:param str is_admin: is_admin
:param str team: team
:param str user: user
:return: TeamMembership
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_team_membership_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.retrieve_team_membership_with_http_info(id, **kwargs) # noqa: E501
return data
def retrieve_team_membership_with_http_info(self, id, **kwargs): # noqa: E501
"""retrieve_team_membership # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_team_membership_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str id2: id
:param str is_admin: is_admin
:param str team: team
:param str user: user
:return: TeamMembership
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'id2', 'is_admin', 'team', 'user'] # noqa: E501
all_params.append('omit')
all_params.append('fields')
all_params.append('expand')
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_team_membership" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `retrieve_team_membership`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'is_admin' in params:
query_params.append(('is_admin', params['is_admin'])) # noqa: E501
if 'team' in params:
query_params.append(('team', params['team'])) # noqa: E501
if 'user' in params:
query_params.append(('user', params['user'])) # noqa: E501
if 'omit' in params:
query_params.append(('omit', params['omit'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E50
if 'expand' in params:
query_params.append(('expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/team_membership/{id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamMembership', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_team_membership(self, id, **kwargs): # noqa: E501
"""update_team_membership # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_team_membership(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Body21 body:
:param str id2: id
:param str is_admin2: is_admin
:param str team2: team
:param str user2: user
:return: TeamMembership
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_team_membership_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_team_membership_with_http_info(id, **kwargs) # noqa: E501
return data
def update_team_membership_with_http_info(self, id, **kwargs): # noqa: E501
"""update_team_membership # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_team_membership_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Body21 body:
:param str id2: id
:param str is_admin2: is_admin
:param str team2: team
:param str user2: user
:return: TeamMembership
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body', 'id2', 'is_admin2', 'team2', 'user2'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_team_membership" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_team_membership`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'is_admin2' in params:
query_params.append(('is_admin', params['is_admin2'])) # noqa: E501
if 'team2' in params:
query_params.append(('team', params['team2'])) # noqa: E501
if 'user2' in params:
query_params.append(('user', params['user2'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'is_admin' in params:
form_params.append(('is_admin', params['is_admin'])) # noqa: E501
if 'team' in params:
form_params.append(('team', params['team'])) # noqa: E501
| |
chunk, chunk_periods, valid_period, log_file, with_index, valid_index, hdf=hdf[1])
test_index = tag_chunk('test', label, chunk, chunk_periods, test_period, log_file, with_index, test_index, hdf=hdf[2])
inter_periods = list(chunk_periods.intersection(set(range(test_period[1]+1,355))))
log_file.write('Periods greater than test_period: %s\r\n' % str(inter_periods))
p_chunk = chunk.loc[(slice(None), inter_periods), :]
log_file.write('Records greater than test_period - Number of rows: %d\r\n' % (p_chunk.shape[0]))
del chunk
i += 1
return train_index, valid_index, test_index
def allfeatures_prepro_file(RAW_DIR, file_path, raw_dir, file_name, target_path, train_period, valid_period, test_period, log_file, dividing='percentage', chunksize=500000,
refNorm=True, label='DELINQUENCY_STATUS_NEXT', with_index=True, output_hdf=True):
descriptive_cols = [
'LOAN_ID',
'ASOFMONTH',
'PERIOD_NEXT',
'MOD_PER_FROM',
'MOD_PER_TO',
'PROPERTY_ZIP',
'INVALID_TRANSITIONS'
]
numeric_cols = ['MBA_DAYS_DELINQUENT', 'MBA_DAYS_DELINQUENT_NAN',
'CURRENT_INTEREST_RATE', 'CURRENT_INTEREST_RATE_NAN', 'LOANAGE', 'LOANAGE_NAN',
'CURRENT_BALANCE', 'CURRENT_BALANCE_NAN', 'SCHEDULED_PRINCIPAL',
'SCHEDULED_PRINCIPAL_NAN', 'SCHEDULED_MONTHLY_PANDI',
'SCHEDULED_MONTHLY_PANDI_NAN',
'LLMA2_CURRENT_INTEREST_SPREAD', 'LLMA2_CURRENT_INTEREST_SPREAD_NAN',
'LLMA2_C_IN_LAST_12_MONTHS',
'LLMA2_30_IN_LAST_12_MONTHS', 'LLMA2_60_IN_LAST_12_MONTHS',
'LLMA2_90_IN_LAST_12_MONTHS', 'LLMA2_FC_IN_LAST_12_MONTHS',
'LLMA2_REO_IN_LAST_12_MONTHS', 'LLMA2_0_IN_LAST_12_MONTHS',
'LLMA2_HIST_LAST_12_MONTHS_MIS',
'NUM_MODIF', 'NUM_MODIF_NAN', 'P_RATE_TO_MOD', 'P_RATE_TO_MOD_NAN', 'MOD_RATE',
'MOD_RATE_NAN', 'DIF_RATE', 'DIF_RATE_NAN', 'P_MONTHLY_PAY',
'P_MONTHLY_PAY_NAN', 'MOD_MONTHLY_PAY', 'MOD_MONTHLY_PAY_NAN',
'DIF_MONTHLY_PAY', 'DIF_MONTHLY_PAY_NAN', 'CAPITALIZATION_AMT',
'CAPITALIZATION_AMT_NAN', 'MORTGAGE_RATE', 'MORTGAGE_RATE_NAN',
'FICO_SCORE_ORIGINATION', 'INITIAL_INTEREST_RATE', 'ORIGINAL_LTV',
'ORIGINAL_BALANCE', 'BACKEND_RATIO', 'BACKEND_RATIO_NAN',
'ORIGINAL_TERM', 'ORIGINAL_TERM_NAN', 'SALE_PRICE', 'SALE_PRICE_NAN',
'PREPAY_PENALTY_TERM', 'PREPAY_PENALTY_TERM_NAN',
'NUMBER_OF_UNITS', 'NUMBER_OF_UNITS_NAN', 'MARGIN',
'MARGIN_NAN', 'PERIODIC_RATE_CAP', 'PERIODIC_RATE_CAP_NAN',
'PERIODIC_RATE_FLOOR', 'PERIODIC_RATE_FLOOR_NAN', 'LIFETIME_RATE_CAP',
'LIFETIME_RATE_CAP_NAN', 'LIFETIME_RATE_FLOOR',
'LIFETIME_RATE_FLOOR_NAN', 'RATE_RESET_FREQUENCY',
'RATE_RESET_FREQUENCY_NAN', 'PAY_RESET_FREQUENCY',
'PAY_RESET_FREQUENCY_NAN', 'FIRST_RATE_RESET_PERIOD',
'FIRST_RATE_RESET_PERIOD_NAN',
'LLMA2_PRIME',
'LLMA2_SUBPRIME', 'LLMA2_APPVAL_LT_SALEPRICE', 'LLMA2_ORIG_RATE_SPREAD',
'LLMA2_ORIG_RATE_SPREAD_NAN', 'AGI', 'AGI_NAN', 'UR', 'UR_NAN', 'LLMA2_ORIG_RATE_ORIG_MR_SPREAD',
'LLMA2_ORIG_RATE_ORIG_MR_SPREAD_NAN', 'COUNT_INT_RATE_LESS', 'NUM_PRIME_ZIP', 'NUM_PRIME_ZIP_NAN'
]
# nan_cols = {'MBA_DAYS_DELINQUENT': 0, 'CURRENT_INTEREST_RATE': 0, 'LOANAGE': 0,
# 'CURRENT_BALANCE' : 0, 'SCHEDULED_PRINCIPAL': 0, 'SCHEDULED_MONTHLY_PANDI': 0,
# 'LLMA2_CURRENT_INTEREST_SPREAD': 0, 'NUM_MODIF': 0, 'P_RATE_TO_MOD': 0, 'MOD_RATE': 0,
# 'DIF_RATE': 0, 'P_MONTHLY_PAY': 0, 'MOD_MONTHLY_PAY': 0, 'DIF_MONTHLY_PAY': 0, 'CAPITALIZATION_AMT': 0,
# 'MORTGAGE_RATE': 0, 'FICO_SCORE_ORIGINATION': 0, 'INITIAL_INTEREST_RATE': 0, 'ORIGINAL_LTV': 0,
# 'ORIGINAL_BALANCE': 0, 'BACKEND_RATIO': 0, 'ORIGINAL_TERM': 0, 'SALE_PRICE': 0, 'PREPAY_PENALTY_TERM': 0,
# 'NUMBER_OF_UNITS': 0, 'MARGIN': 0, 'PERIODIC_RATE_CAP': 0, 'PERIODIC_RATE_FLOOR': 0, 'LIFETIME_RATE_CAP': 0,
# 'LIFETIME_RATE_FLOOR': 0, 'RATE_RESET_FREQUENCY': 0, 'PAY_RESET_FREQUENCY': 0,
# 'FIRST_RATE_RESET_PERIOD': 0, 'LLMA2_ORIG_RATE_SPREAD': 0, 'AGI': 0, 'UR': 0,
# 'LLMA2_C_IN_LAST_12_MONTHS': 0, 'LLMA2_30_IN_LAST_12_MONTHS': 0, 'LLMA2_60_IN_LAST_12_MONTHS': 0,
# 'LLMA2_90_IN_LAST_12_MONTHS': 0, 'LLMA2_FC_IN_LAST_12_MONTHS': 0,
# 'LLMA2_REO_IN_LAST_12_MONTHS': 0, 'LLMA2_0_IN_LAST_12_MONTHS': 0}
nan_cols = {'MBA_DAYS_DELINQUENT': 'median', 'CURRENT_INTEREST_RATE': 'median', 'LOANAGE': 'median',
'CURRENT_BALANCE' : 'median', 'SCHEDULED_PRINCIPAL': 'median', 'SCHEDULED_MONTHLY_PANDI': 'median',
'LLMA2_CURRENT_INTEREST_SPREAD': 'median', 'NUM_MODIF': 0, 'P_RATE_TO_MOD': 0, 'MOD_RATE': 0,
'DIF_RATE': 0, 'P_MONTHLY_PAY': 0, 'MOD_MONTHLY_PAY': 0, 'DIF_MONTHLY_PAY': 0, 'CAPITALIZATION_AMT': 0,
'MORTGAGE_RATE': 'median', 'FICO_SCORE_ORIGINATION': 'median', 'INITIAL_INTEREST_RATE': 'median', 'ORIGINAL_LTV': 'median',
'ORIGINAL_BALANCE': 'median', 'BACKEND_RATIO': 'median', 'ORIGINAL_TERM': 'median', 'SALE_PRICE': 'median', 'PREPAY_PENALTY_TERM': 'median',
'NUMBER_OF_UNITS': 'median', 'MARGIN': 'median', 'PERIODIC_RATE_CAP': 'median', 'PERIODIC_RATE_FLOOR': 'median', 'LIFETIME_RATE_CAP': 'median',
'LIFETIME_RATE_FLOOR': 'median', 'RATE_RESET_FREQUENCY': 'median', 'PAY_RESET_FREQUENCY': 'median',
'FIRST_RATE_RESET_PERIOD': 'median', 'LLMA2_ORIG_RATE_SPREAD': 'median', 'AGI': 'median', 'UR': 'median',
'LLMA2_C_IN_LAST_12_MONTHS': 'median', 'LLMA2_30_IN_LAST_12_MONTHS': 'median', 'LLMA2_60_IN_LAST_12_MONTHS': 'median',
'LLMA2_90_IN_LAST_12_MONTHS': 'median', 'LLMA2_FC_IN_LAST_12_MONTHS': 'median',
'LLMA2_REO_IN_LAST_12_MONTHS': 'median', 'LLMA2_0_IN_LAST_12_MONTHS': 'median',
'LLMA2_ORIG_RATE_ORIG_MR_SPREAD':0, 'COUNT_INT_RATE_LESS' :'median', 'NUM_PRIME_ZIP':'median'
}
categorical_cols = {'MBA_DELINQUENCY_STATUS': ['0','3','6','9','C','F','R'], 'DELINQUENCY_STATUS_NEXT': ['0','3','6','9','C','F','R'], #,'S','T','X'
'BUYDOWN_FLAG': ['N','U','Y'], 'NEGATIVE_AMORTIZATION_FLAG': ['N','U','Y'], 'PREPAY_PENALTY_FLAG': ['N','U','Y'],
'OCCUPANCY_TYPE': ['1','2','3','U'], 'PRODUCT_TYPE': ['10','20','30','40','50','51','52','53','54','5A','5Z',
'60','61','62','63','6Z','70','80','81','82','83','84','8Z','U'],
'PROPERTY_TYPE': ['1','2','3','4','5','6','7','8','9','M','U','Z'], 'LOAN_PURPOSE_CATEGORY': ['P','R','U'],
'DOCUMENTATION_TYPE': ['1','2','3','U'], 'CHANNEL': ['1','2','3','4','5','6','7','8','9','A','B','C','D','U'],
'LOAN_TYPE': ['1','2','3','4','5','6','U'], 'IO_FLAG': ['N','U','Y'],
'CONVERTIBLE_FLAG': ['N','U','Y'], 'POOL_INSURANCE_FLAG': ['N','U','Y'], 'STATE': ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO',
'CT', 'DC', 'DE', 'FL', 'GA', 'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA',
'MD', 'ME', 'MI', 'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV',
'NY', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VT',
'WA', 'WI', 'WV', 'WY'],
'CURRENT_INVESTOR_CODE': ['240', '250', '253', 'U'], 'ORIGINATION_YEAR': ['B1995','1995','1996','1997','1998','1999','2000','2001','2002','2003',
'2004','2005','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015','2016','2017','2018']}
time_cols = ['YEAR', 'MONTH'] #, 'PERIOD'] #no nan values
total_cols = numeric_cols.copy()
total_cols.extend(descriptive_cols)
total_cols.extend(categorical_cols.keys())
total_cols.extend(time_cols)
print('raw total_cols size: ', len(total_cols)) #110 !=112?? set(chunk_cols) - set(total_cols): {'LOAN_ID', 'PERIOD'}
pd.set_option('io.hdf.default_format','table')
dist_file = pd.read_csv(os.path.join(RAW_DIR, "percentile features3-test.csv"), sep=';', low_memory=False)
dist_file.columns = dist_file.columns.str.upper()
ncols = [x for x in numeric_cols if x.find('NAN')<0]
robust_cols, robust_normalizer = custom_robust_normalizer(ncols, dist_file, center_value='quantile', normalizer_type='percentile_scaler')
minmax_cols, minmax_normalizer = custom_minmax_normalizer(ncols, robust_normalizer.scale_, dist_file)
inters = set(robust_cols).intersection(minmax_cols)
to_delete = [i for x,i in zip(minmax_cols,range(len(minmax_cols))) if x in inters]
minmax_normalizer.scale_ = np.delete(minmax_normalizer.scale_,to_delete, 0)
minmax_normalizer.center_ = np.delete(minmax_normalizer.center_,to_delete, 0)
minmax_cols = np.delete(minmax_cols,to_delete, 0)
if (output_hdf == True):
#with pd.HDFStore(target_path +'-pp.h5', complib='lzo', complevel=9) as hdf: #complib='lzo', complevel=9
train_writer = pd.HDFStore(target_path +'-train-pp.h5', complib='lzo', complevel=9)
valid_writer = pd.HDFStore(target_path +'-valid-pp.h5', complib='lzo', complevel=9)
test_writer = pd.HDFStore(target_path +'-test-pp.h5', complib='lzo', complevel=9)
print('generating: ', target_path +'-pp.h5')
train_index, valid_index, test_index = prepro_chunk(file_name, file_path, chunksize, label, log_file, nan_cols, categorical_cols, descriptive_cols,
time_cols, robust_cols, minmax_cols, robust_normalizer, minmax_normalizer, dist_file, with_index,
refNorm, train_period, valid_period, test_period, hdf=[train_writer, valid_writer, test_writer], tfrec=None)
print(train_index, valid_index, test_index)
if train_writer.get_storer('train/features').nrows != train_writer.get_storer('train/labels').nrows:
raise ValueError('Train-DataSet: Sizes should match!')
if valid_writer.get_storer('valid/features').nrows != valid_writer.get_storer('valid/labels').nrows:
raise ValueError('Valid-DataSet: Sizes should match!')
if test_writer.get_storer('test/features').nrows != test_writer.get_storer('test/labels').nrows:
raise ValueError('Test-DataSet: Sizes should match!')
print('train/features size: ', train_writer.get_storer('train/features').nrows)
print('valid/features size: ', valid_writer.get_storer('valid/features').nrows)
print('test/features size: ', test_writer.get_storer('test/features').nrows)
log_file.write('***SUMMARY***\n')
log_file.write('train/features size: %d\r\n' %(train_writer.get_storer('train/features').nrows))
log_file.write('valid/features size: %d\r\n' %(valid_writer.get_storer('valid/features').nrows))
log_file.write('test/features size: %d\r\n' %(test_writer.get_storer('test/features').nrows))
logger.info('training, validation and testing set into .h5 file')
else:
train_writer = tf.python_io.TFRecordWriter(target_path +'-train-pp.tfrecords')
valid_writer = tf.python_io.TFRecordWriter(target_path +'-valid-pp.tfrecords')
test_writer = tf.python_io.TFRecordWriter(target_path +'-test-pp.tfrecords')
train_index, valid_index, test_index = prepro_chunk(file_name, file_path, chunksize, label, log_file, nan_cols, categorical_cols, descriptive_cols, time_cols,
robust_cols, minmax_cols, robust_normalizer, minmax_normalizer, dist_file, with_index, refNorm, train_period,
valid_period, test_period, hdf=None, tfrec=[train_writer, valid_writer, test_writer])
print(train_index, valid_index, test_index)
train_writer.close()
valid_writer.close()
test_writer.close()
def get_other_set_slice(prep_dir, init_period, end_period, set_dir, file_name, chunk_size=8000000):
pd.set_option('io.hdf.default_format','table')
try:
chunk_ind = 0
target_path = os.path.join(PRO_DIR, set_dir,file_name+'_{:d}.h5'.format(chunk_ind))
hdf_target = pd.HDFStore(target_path)
print('Target Path: ', target_path)
total_rows = 0
for file_path in glob.glob(os.path.join(PRO_DIR, prep_dir, "*.h5")):
file_name = os.path.basename(file_path)
with pd.HDFStore(file_path) as hdf_input:
# hdf_input.get['features'].
# temp_features = pd.read_hdf(self.h5_path, self.dtype + '/features', start=self._global_index, stop=self._global_index + batch_size)
# df = hdf_input.select('features', [ Term('index', '>', Timestamp('20010105') ])
period_range = set(range(init_period, end_period+1))
period_features = set(list(hdf_input['features'].index.get_level_values(2)))
period_inter = period_features.intersection(period_range)
for i in list(period_inter):
df_features = hdf_input['features'].loc[(slice(None), slice(None), i), :]
df_labels = hdf_input['labels'].loc[(slice(None), slice(None), i), :]
hdf_target.put('features', df_features, append=True)
hdf_target.put('labels', df_labels, append=True)
hdf_target.flush()
total_rows += df_features.shape[0]
num_columns = len(df_features.columns.values)
del df_features
del df_labels
if (total_rows >= chunk_size or i==period_inter[-1]):
if hdf_target.get_storer('features').nrows != hdf_target.get_storer('labels').nrows:
raise ValueError('DataSet: Sizes should match!')
hdf_target.get_storer('features').attrs.num_columns = num_columns
hdf_target.close()
total_rows = 0
chunk_ind += 1
if (i!=period_inter[-1]):
target_path = os.path.join(PRO_DIR, set_dir,file_name+'_{:d}.h5'.format(chunk_ind))
hdf_target = pd.HDFStore(target_path)
print('Target Path: ', target_path)
if hdf_target.is_open: hdf_target.close()
except Exception as e:
hdf_target.close()
print(e)
def get_other_set(prep_dir, init_period, end_period, set_dir, chunk_size=8000000):
pd.set_option('io.hdf.default_format','table')
try:
chunk_ind = 0
for file_path in glob.glob(os.path.join(PRO_DIR, prep_dir, "*.h5")):
file_name = os.path.basename(file_path)
print(file_name)
with pd.HDFStore(file_path) as hdf_input:
file_index = 0
for df_features in hdf_input.select('features', "PERIOD>=" + str(init_period) + ' & PERIOD<=' + str(end_period), chunksize = chunk_size):
try:
target_path = os.path.join(PRO_DIR, set_dir,file_name[:-4]+'_{:d}.h5'.format(chunk_ind))
hdf_target = pd.HDFStore(target_path)
print('Target Path: ', target_path)
if file_index + chunk_size <= hdf_input.get_storer('features').nrows:
df_labels = hdf_input.select('labels', "PERIOD>=" + str(init_period) + ' & PERIOD<=' + str(end_period), start = file_index, stop = file_index + chunk_size)
file_index += chunk_size
else:
df_labels = hdf_input.select('labels', "PERIOD>=" + str(init_period) + ' & PERIOD<=' + str(end_period), start = file_index)
file_index = 0
hdf_target.put('features', df_features, append=True)
hdf_target.put('labels', df_labels, append=True)
hdf_target.flush()
num_columns = len(df_features.columns.values)
hdf_target.get_storer('features').attrs.num_columns = num_columns
if hdf_target.get_storer('features').nrows != hdf_target.get_storer('labels').nrows:
raise ValueError('DataSet: Sizes should match!')
hdf_target.close()
del df_labels
del df_features
chunk_ind += 1
except Exception as e:
if hdf_target.is_open: hdf_target.close()
except Exception as e:
print(e)
def slice_fixed_sets(prep_dir, set_dir, tag, chunk_size=400000):
pd.set_option('io.hdf.default_format','fixed') #'table')
try:
chunk_ind = 0
for file_path in glob.glob(os.path.join(PRO_DIR, prep_dir, "*.h5")):
file_name = os.path.basename(file_path)
print(file_name)
with pd.HDFStore(file_path) as hdf_input:
file_index = 0
for df_features in hdf_input.select(tag + '/features', chunksize = chunk_size):
try:
target_path = os.path.join(PRO_DIR, set_dir,file_name[:-4]+'_{:d}.h5'.format(chunk_ind))
hdf_target = pd.HDFStore(target_path, complib='lzo', complevel=9, chunkshape='auto')
print('Target Path: ', target_path)
df_labels = hdf_input.select(tag + '/labels', start = file_index, stop = file_index + df_features.shape[0])
# df_labels = df_labels.reset_index(level='index', drop=True)
# df_labels.set_index('index', range(0, chunk_size), append=True, inplace=True)
df_features.index = pd.MultiIndex.from_tuples([(i, x[1], x[2],x[3]) for x,i in zip(df_features.index, range(0, df_features.shape[0]))])
df_labels.index = pd.MultiIndex.from_tuples([(i, x[1], x[2],x[3]) for x,i in zip(df_labels.index, range(0, df_labels.shape[0]))])
file_index += df_features.shape[0]
hdf_target.put(tag + '/features', df_features)
hdf_target.put(tag + '/labels', df_labels)
hdf_target.flush()
if hdf_target.get_storer(tag+'/features').shape[0] != hdf_target.get_storer(tag + '/labels').shape[0]:
raise ValueError('DataSet: Sizes should match!')
hdf_target.close()
del df_labels
del df_features
chunk_ind += 1
except Exception as e:
if hdf_target.is_open: hdf_target.close()
except Exception as e:
print(e)
def slice_table_sets(prep_dir, set_dir, tag, target_name, input_chunk_size=1200, target_size = 70000, with_index=True, index=0):
'''The input directory must not be the same as the output directory, because the .h5 output files can be confused with the input files. '''
pd.set_option('io.hdf.default_format', 'table')
all_files | |
a parameter"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("PARAM:",par)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_parameter(par.name, par.type)
self.shared.function_params += 1
return index
def constant_action(self, text, loc, const):
"""Code executed after recognising a constant"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("CONST:",const)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
return self.symtab.insert_constant(const[0], const[1])
def function_begin_action(self, text, loc, fun):
"""Code executed after recognising a function definition (type and function name)"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("FUN_BEGIN:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.shared.function_index = self.symtab.insert_function(fun.name, fun.type)
self.shared.function_name = fun.name
self.shared.function_params = 0
self.shared.function_vars = 0
self.codegen.function_begin();
def function_body_action(self, text, loc, fun):
"""Code executed after recognising the beginning of function's body"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("FUN_BODY:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.codegen.function_body()
def function_end_action(self, text, loc, fun):
"""Code executed at the end of function definition"""
if DEBUG > 0:
print("FUN_END:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#set function's attribute to number of function parameters
self.symtab.set_attribute(self.shared.function_index, self.shared.function_params)
#clear local function symbols (but leave function name)
self.symtab.clear_symbols(self.shared.function_index + 1)
self.codegen.function_end()
def return_action(self, text, loc, ret):
"""Code executed after recognising a return statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("RETURN:",ret)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
if not self.symtab.same_types(self.shared.function_index, ret.exp[0]):
raise SemanticException("Incompatible type in return")
#set register for function's return value to expression value
reg = self.codegen.take_function_register()
self.codegen.move(ret.exp[0], reg)
#after return statement, register for function's return value is available again
self.codegen.free_register(reg)
#jump to function's exit
self.codegen.unconditional_jump(self.codegen.label(self.shared.function_name+"_exit", True))
def lookup_id_action(self, text, loc, var):
"""Code executed after recognising an identificator in expression"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("EXP_VAR:",var)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
var_index = self.symtab.lookup_symbol(var.name, [SharedData.KINDS.GLOBAL_VAR, SharedData.KINDS.PARAMETER, SharedData.KINDS.LOCAL_VAR])
if var_index == None:
raise SemanticException("'%s' undefined" % var.name)
return var_index
def assignment_action(self, text, loc, assign):
"""Code executed after recognising an assignment statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("ASSIGN:",assign)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
var_index = self.symtab.lookup_symbol(assign.var, [SharedData.KINDS.GLOBAL_VAR, SharedData.KINDS.PARAMETER, SharedData.KINDS.LOCAL_VAR])
if var_index == None:
raise SemanticException("Undefined lvalue '%s' in assignment" % assign.var)
if not self.symtab.same_types(var_index, assign.exp[0]):
raise SemanticException("Incompatible types in assignment")
self.codegen.move(assign.exp[0], var_index)
def mulexp_action(self, text, loc, mul):
"""Code executed after recognising a mulexp expression (something *|/ something)"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("MUL_EXP:",mul)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#iterate through all multiplications/divisions
m = list(mul)
while len(m) > 1:
if not self.symtab.same_types(m[0], m[2]):
raise SemanticException("Invalid opernads to binary '%s'" % m[1])
reg = self.codegen.arithmetic(m[1], m[0], m[2])
#replace first calculation with it's result
m[0:3] = [reg]
return m[0]
def numexp_action(self, text, loc, num):
"""Code executed after recognising a numexp expression (something +|- something)"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("NUM_EXP:",num)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#iterate through all additions/substractions
n = list(num)
while len(n) > 1:
if not self.symtab.same_types(n[0], n[2]):
raise SemanticException("Invalid opernads to binary '%s'" % n[1])
reg = self.codegen.arithmetic(n[1], n[0], n[2])
#replace first calculation with it's result
n[0:3] = [reg]
return n[0]
def function_call_prepare_action(self, text, loc, fun):
"""Code executed after recognising a function call (type and function name)"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("FUN_PREP:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.lookup_symbol(fun.name, SharedData.KINDS.FUNCTION)
if index == None:
raise SemanticException("'%s' is not a function" % fun.name)
#save any previous function call data (for nested function calls)
self.function_call_stack.append(self.function_call_index)
self.function_call_index = index
self.function_arguments_stack.append(self.function_arguments[:])
del self.function_arguments[:]
self.codegen.save_used_registers()
def argument_action(self, text, loc, arg):
"""Code executed after recognising each of function's arguments"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("ARGUMENT:",arg.exp)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
arg_ordinal = len(self.function_arguments)
#check argument's type
if not self.symtab.same_type_as_argument(arg.exp, self.function_call_index, arg_ordinal):
raise SemanticException("Incompatible type for argument %d in '%s'" % (arg_ordinal + 1, self.symtab.get_name(self.function_call_index)))
self.function_arguments.append(arg.exp)
def function_call_action(self, text, loc, fun):
"""Code executed after recognising the whole function call"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("FUN_CALL:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#check number of arguments
if len(self.function_arguments) != self.symtab.get_attribute(self.function_call_index):
raise SemanticException("Wrong number of arguments for function '%s'" % fun.name)
#arguments should be pushed to stack in reverse order
self.function_arguments.reverse()
self.codegen.function_call(self.function_call_index, self.function_arguments)
self.codegen.restore_used_registers()
return_type = self.symtab.get_type(self.function_call_index)
#restore previous function call data
self.function_call_index = self.function_call_stack.pop()
self.function_arguments = self.function_arguments_stack.pop()
register = self.codegen.take_register(return_type)
#move result to a new free register, to allow the next function call
self.codegen.move(self.codegen.take_function_register(return_type), register)
return register
def relexp_action(self, text, loc, arg):
"""Code executed after recognising a relexp expression (something relop something)"""
if DEBUG > 0:
print("REL_EXP:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
exshared.setpos(loc, text)
if not self.symtab.same_types(arg[0], arg[2]):
raise SemanticException("Invalid operands for operator '{0}'".format(arg[1]))
self.codegen.compare(arg[0], arg[2])
#return relational operator's code
self.relexp_code = self.codegen.relop_code(arg[1], self.symtab.get_type(arg[0]))
return self.relexp_code
def andexp_action(self, text, loc, arg):
"""Code executed after recognising a andexp expression (something and something)"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("AND+EXP:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
label = self.codegen.label("false{0}".format(self.false_label_number), True, False)
self.codegen.jump(self.relexp_code, True, label)
self.andexp_code = self.relexp_code
return self.andexp_code
def logexp_action(self, text, loc, arg):
"""Code executed after recognising logexp expression (something or something)"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("LOG_EXP:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
label = self.codegen.label("true{0}".format(self.label_number), True, False)
self.codegen.jump(self.relexp_code, False, label)
self.codegen.newline_label("false{0}".format(self.false_label_number), True, True)
self.false_label_number += 1
def if_begin_action(self, text, loc, arg):
"""Code executed after recognising an if statement (if keyword)"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("IF_BEGIN:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.false_label_number += 1
self.label_number = self.false_label_number
self.codegen.newline_label("if{0}".format(self.label_number), True, True)
def if_body_action(self, text, loc, arg):
"""Code executed after recognising if statement's body"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("IF_BODY:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#generate conditional jump (based on last compare)
label = self.codegen.label("false{0}".format(self.false_label_number), True, False)
self.codegen.jump(self.relexp_code, True, label)
#generate 'true' label (executes if condition is satisfied)
self.codegen.newline_label("true{0}".format(self.label_number), True, True)
#save label numbers (needed for nested if/while statements)
self.label_stack.append(self.false_label_number)
self.label_stack.append(self.label_number)
def if_else_action(self, text, loc, arg):
"""Code executed after recognising if statement's else body"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("IF_ELSE:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#jump to exit after all statements for true condition are executed
self.label_number = self.label_stack.pop()
label = self.codegen.label("exit{0}".format(self.label_number), True, False)
self.codegen.unconditional_jump(label)
#generate final 'false' label (executes if condition isn't satisfied)
self.codegen.newline_label("false{0}".format(self.label_stack.pop()), True, True)
self.label_stack.append(self.label_number)
def if_end_action(self, text, loc, arg):
"""Code executed after recognising a whole if statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("IF_END:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.codegen.newline_label("exit{0}".format(self.label_stack.pop()), True, True)
def while_begin_action(self, text, loc, arg):
"""Code executed after recognising a while statement (while keyword)"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("WHILE_BEGIN:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.false_label_number += 1
self.label_number = self.false_label_number
self.codegen.newline_label("while{0}".format(self.label_number), True, True)
def while_body_action(self, text, loc, arg):
"""Code executed after recognising while statement's body"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("WHILE_BODY:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#generate conditional jump (based on last compare)
label = self.codegen.label("false{0}".format(self.false_label_number), True, False)
self.codegen.jump(self.relexp_code, True, label)
#generate 'true' label (executes if condition is satisfied)
self.codegen.newline_label("true{0}".format(self.label_number), True, True)
self.label_stack.append(self.false_label_number)
self.label_stack.append(self.label_number)
def while_end_action(self, text, loc, arg):
"""Code executed after recognising a whole while statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("WHILE_END:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#jump to condition checking after while statement body
self.label_number = self.label_stack.pop()
label = self.codegen.label("while{0}".format(self.label_number), True, False)
self.codegen.unconditional_jump(label)
#generate final 'false' label and exit label
self.codegen.newline_label("false{0}".format(self.label_stack.pop()), True, True)
self.codegen.newline_label("exit{0}".format(self.label_number), True, True)
def program_end_action(self, text, loc, arg):
"""Checks if there is a 'main' function and the type of 'main' function"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("PROGRAM_END:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.lookup_symbol("main",SharedData.KINDS.FUNCTION)
if index == None:
raise SemanticException("Undefined reference to 'main'", False)
elif self.symtab.get_type(index) != SharedData.TYPES.INT:
self.warning("Return type of 'main' | |
else:
rfdlist.remove(self.rfd)
self.flag_eof = True
if pty.STDIN_FILENO in r:
try:
data = None
data = os.read(pty.STDIN_FILENO, 1024)
except OSError, e:
# the subprocess may have closed before we get to reading it
if e.errno != errno.EIO:
raise
if self.debug and os.isatty(self.wfd):
wfd_mode = tty.tcgetattr(self.wfd)
log('stdin wfd mode = ' + repr(wfd_mode), f = self.debug)
# in BSD, you can still read '' from rfd, so never use `data is not None` here
if data:
if input_filter: data = input_filter(data)
i = input_filter and -1 or data.rfind(escape_character)
if i != -1: data = data[:i]
if not os.isatty(self.wfd): # we must do the translation when tty does not help
data = data.replace('\r', '\n')
# also echo back by ourselves, now we are echoing things we input by hand, so there is no need to wrap with print_write by default, unless raw_rw set to False
stdout(raw_rw and data or self._print_write(data))
while data != b'' and self.isalive():
n = self._write(data)
data = data[n:]
if i != -1:
self.end(force_close = True)
break
else:
self.end(force_close = True)
rfdlist.remove(pty.STDIN_FILENO)
while True: # read the final buffered output, note that the process probably is not alive, so use while True to read until end (fix pipe stdout interact mode bug)
r, w, e = self.__select([self.rfd], [], [], timeout = self.close_delay)
if self.rfd in r:
try:
data = None
data = os.read(self.rfd, 1024)
except OSError, e:
if e.errno != errno.EIO:
raise
# in BSD, you can still read '' from rfd, so never use `data is not None` here
if data:
if output_filter: data = output_filter(data)
stdout(raw_rw and data or self._print_read(data))
else:
self.flag_eof = True
break
else:
break
finally:
if not input_filter and os.isatty(pty.STDIN_FILENO):
tty.tcsetattr(pty.STDIN_FILENO, tty.TCSAFLUSH, mode)
if os.isatty(self.wfd):
self.ttyraw(self.wfd)
def flush(self):
"""
just keep to be a file-like object
"""
pass
def isatty(self):
'''This returns True if the file descriptor is open and connected to a
tty(-like) device, else False. '''
return os.isatty(self.rfd)
def ttyraw(self, fd, when = tty.TCSAFLUSH, echo = False, raw_in = True, raw_out = False):
mode = tty.tcgetattr(fd)[:]
if raw_in:
mode[tty.IFLAG] = mode[tty.IFLAG] & ~(tty.BRKINT | tty.ICRNL | tty.INPCK | tty.ISTRIP | tty.IXON)
mode[tty.CFLAG] = mode[tty.CFLAG] & ~(tty.CSIZE | tty.PARENB)
mode[tty.CFLAG] = mode[tty.CFLAG] | tty.CS8
if echo:
mode[tty.LFLAG] = mode[tty.LFLAG] & ~(tty.ICANON | tty.IEXTEN | tty.ISIG)
else:
mode[tty.LFLAG] = mode[tty.LFLAG] & ~(tty.ECHO | tty.ICANON | tty.IEXTEN | tty.ISIG)
if raw_out:
mode[tty.OFLAG] = mode[tty.OFLAG] & ~(tty.OPOST)
mode[tty.CC][tty.VMIN] = 1
mode[tty.CC][tty.VTIME] = 0
tty.tcsetattr(fd, when, mode)
def mode(self):
if not hasattr(self, '_io_mode'):
if hostport_tuple(self.target) or isinstance(self.target, socket.socket):
self._io_mode = SOCKET
else:
# TODO: add more check condition
self._io_mode = PROCESS
return self._io_mode
def __select(self, iwtd, owtd, ewtd, timeout=None):
'''This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). '''
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select(iwtd, owtd, ewtd, timeout)
except select.error:
err = sys.exc_info()[1]
if err[0] == errno.EINTR:
# if we loop back we have to subtract the
# amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return([], [], [])
else:
# something else caused the select.error, so
# this actually is an exception.
raise
def writelines(self, sequence):
n = 0
for s in sequence:
n += self.writeline(s)
return n
def writeline(self, s = ''):
return self.write(s + os.linesep)
def write(self, s):
if not s: return 0
if self.mode() == SOCKET:
if self.print_write: stdout(self._print_write(s))
self.sock.sendall(s)
return len(s)
elif self.mode() == PROCESS:
#if not self.writable(): raise Exception('subprocess stdin not writable')
time.sleep(self.write_delay)
if not isinstance(s, bytes): s = s.encode('utf-8')
ret = os.write(self.wfd, s)
# don't use echo backed chars, because
# 1. input/output will not be cleaner, I mean, they are always in a mess
# 2. this is a unified interface for pipe/tty write
# 3. echo back characters will translate control chars into ^@ ^A ^B ^C, ah, ugly!
if self.print_write: stdout(self._print_write(s))
return ret
def end(self, force_close = False):
'''
end of writing stream, but we can still read
'''
if self.mode() == SOCKET:
self.sock.shutdown(socket.SHUT_WR)
else:
if not os.isatty(self.wfd): # pipes can be closed harmlessly
os.close(self.wfd)
# for pty, close master fd in Mac won't cause slave fd input/output error, so let's do it!
elif platform.system() == 'Darwin':
os.close(self.wfd)
else: # assume Linux here
# according to http://linux.die.net/man/3/cfmakeraw
# set min = 0 and time > 0, will cause read timeout and return 0 to indicate EOF
# but the tricky thing here is, if child read is invoked before this
# it will still block forever, so you have to call end before that happens
mode = tty.tcgetattr(self.wfd)[:]
mode[tty.CC][tty.VMIN] = 0
mode[tty.CC][tty.VTIME] = 1
tty.tcsetattr(self.wfd, tty.TCSAFLUSH, mode)
if force_close:
time.sleep(self.close_delay)
os.close(self.wfd) # might cause EIO (input/output error)! use force_close at your own risk
return
def close(self, force = True):
'''
close and clean up, nothing can and should be done after closing
'''
if self.closed:
return
if self.mode() == 'socket':
if self.sock:
self.sock.close()
self.sock = None
else:
try:
os.close(self.wfd)
except:
pass # may already closed in write_eof
os.close(self.rfd)
time.sleep(self.close_delay)
if self.isalive():
if not self.terminate(force):
raise Exception('Could not terminate child process')
self.flag_eof = True
self.rfd = -1
self.wfd = -1
self.closed = True
def read(self, size = None, timeout = -1):
if size == 0:
return str()
elif size < 0 or size is None:
self.read_loop(searcher_re(self.compile_pattern_list(EOF)), timeout = timeout)
return self.before
cre = re.compile('.{%d}' % size, re.DOTALL)
index = self.read_loop(searcher_re(self.compile_pattern_list([cre, EOF])), timeout = timeout)
if index == 0:
assert self.before == ''
return self.after
return self.before
def read_until_timeout(self, timeout = 0.05):
try:
incoming = self.buffer
while True:
c = self.read_nonblocking(2048, timeout)
incoming = incoming + c
if self.mode() == PROCESS: time.sleep(0.0001)
except EOF:
err = sys.exc_info()[1]
self.buffer = str()
self.before = str()
self.after = EOF
self.match = incoming
self.match_index = None
raise EOF(str(err) + '\n' + str(self))
except TIMEOUT:
self.buffer = str()
self.before = str()
self.after = TIMEOUT
self.match = incoming
self.match_index = None
return incoming
except:
self.before = str()
self.after = None
self.match = incoming
self.match_index = None
raise
read_eager = read_until_timeout
def readable(self):
return self.__select([self.rfd], [], [], 0) == ([self.rfd], [], [])
def readline(self, size = -1):
if size == 0:
return str()
lineseps = [b'\r\n', b'\n', EOF]
index = self.read_loop(searcher_re(self.compile_pattern_list(lineseps)))
if index < 2:
return self.before + lineseps[index]
else:
return self.before
read_line = readline
def readlines(self, sizehint = -1):
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def read_until(self, pattern_list, timeout = -1, searchwindowsize = None):
if (isinstance(pattern_list, basestring) or
pattern_list in (TIMEOUT, EOF)):
pattern_list = [pattern_list]
def prepare_pattern(pattern):
if pattern in (TIMEOUT, EOF):
return pattern
if isinstance(pattern, basestring):
return pattern
self._pattern_type_err(pattern)
try:
pattern_list = iter(pattern_list)
except TypeError:
self._pattern_type_err(pattern_list)
pattern_list = [prepare_pattern(p) for p in pattern_list]
matched = self.read_loop(searcher_string(pattern_list), timeout, searchwindowsize)
ret = self.before
if isinstance(self.after, basestring):
ret += self.after # after is the matched string, before is the string before this match
return ret # be compatible with telnetlib.read_until
def read_until_re(self, pattern, timeout = -1, searchwindowsize = None):
compiled_pattern_list = self.compile_pattern_list(pattern)
matched = self.read_loop(searcher_re(compiled_pattern_list), timeout, searchwindowsize)
ret = self.before
if isinstance(self.after, basestring):
ret += self.after
return ret
def read_loop(self, searcher, timeout=-1, searchwindowsize = None):
'''This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and
what to search for in the input.
See expect() for other arguments, return value and exceptions. '''
self.searcher = searcher
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
try:
incoming = self.buffer
freshlen = len(incoming)
while True:
# | |
<filename>container/pyf/models/oldies/AcreditationEntities.py
from sqlalchemy import Column, String, BigInteger, Integer, DateTime, ForeignKey, Sequence
import datetime
from functools import cache
import sqlengine.sqlengine as SqlEngine
from . import BaseModel
@cache # funny thing, it makes from this function a singleton
def GetModels(BaseModel=BaseModel.getBaseModel(), unitedSequence=Sequence('all_id_seq')):
"""create elementary models for information systems
Parameters
----------
BaseModel
represents the declarative_base instance from SQLAlchemy
unitedSequence : Sequence
represents a method for generating keys (usually ids) for database entities
Returns
-------
(UserModel, GroupModel, RoleModel, GroupTypeModel, RoleTypeModel)
tuple of models based on BaseModel, table names are hardcoded
"""
#assert not(unitedSequence is None), "unitedSequence must be defined"
print('Base models definition (ProgramModel, SubjectModel, SubjectSemesterModel, TopicModel)')
class ProgramModel(BaseModel):
__tablename__ = 'programs'
id = Column(BigInteger, unitedSequence, primary_key=True)
name = Column(String)
lastchange = Column(DateTime, default=datetime.datetime.now)
externalId = Column(BigInteger, index=True)
class SubjectModel(BaseModel):
__tablename__ = 'subjects'
id = Column(BigInteger, unitedSequence, primary_key=True)
name = Column(String)
lastchange = Column(DateTime, default=datetime.datetime.now)
externalId = Column(String, index=True)
class SubjectSemesterModel(BaseModel):
__tablename__ = 'subjectsemesters'
id = Column(BigInteger, unitedSequence, primary_key=True)
name = Column(String)
lastchange = Column(DateTime, default=datetime.datetime.now)
class TopicModel(BaseModel):
__tablename__ = 'topics'
id = Column(BigInteger, unitedSequence, primary_key=True)
name = Column(String)
class SubjectUserRoleModel(BaseModel):
__tablename__ = 'subjectuserroles'
id = Column(BigInteger, unitedSequence, primary_key=True)
name = Column(String)
class SubjectUserRoleTypeModel(BaseModel):
__tablename__ = 'subjectuserroletypes'
id = Column(BigInteger, unitedSequence, primary_key=True)
name = Column(String)
class ProgramUserRoleTypeModel(BaseModel):
__tablename__ = 'programuserroletypes'
id = Column(BigInteger, unitedSequence, primary_key=True)
name = Column(String)
return ProgramModel, SubjectModel, SubjectSemesterModel, TopicModel, SubjectUserRoleModel, SubjectUserRoleTypeModel, ProgramUserRoleTypeModel
from . import Relations
from . import BaseEntities
@cache
def BuildRelations():
UserModel, GroupModel, RoleModel, GroupTypeModel, RoleTypeModel = BaseEntities.GetModels()
ProgramModel, SubjectModel, SubjectSemesterModel, TopicModel, SubjectUserRoleModel, SubjectUserRoleTypeModel, ProgramUserRoleTypeModel = GetModels()
print('building relations between base models')
Relations.defineRelation1N(ProgramModel, SubjectModel)
Relations.defineRelation1N(SubjectModel, SubjectSemesterModel)
Relations.defineRelation1N(SubjectSemesterModel, TopicModel)
Relations.defineRelationNM(UserModel, SubjectModel, tableAItemName='grantingsubjects', tableBItemName='guarantors')
print('building relations between base models finished')
#defineRelationNM(BaseModel, EventModel, UserModel, 'teachers', 'events')
pass
from types import MappingProxyType
@cache
def ensureData(SessionMaker):
def ensureDataItem(session, Model, name):
itemRecords = session.query(Model).filter(Model.name == name).all()
itemRecordsLen = len(itemRecords)
if itemRecordsLen == 0:
itemRecord = Model(name=name)
session.add(itemRecord)
session.commit()
else:
assert itemRecordsLen == 1, f'Database has inconsistencies {Model}, {name}'
itemRecord = itemRecords[0]
return itemRecord.id
ProgramModel, SubjectModel, SubjectSemesterModel, TopicModel, SubjectUserRoleModel, SubjectUserRoleTypeModel, ProgramUserRoleTypeModel = GetModels()
session = SessionMaker()
try:
guaranteeSubjectTypeId = ensureDataItem(session, SubjectUserRoleTypeModel, 'guarantee')
teacherTypeId = ensureDataItem(session, SubjectUserRoleTypeModel, 'teacher')
guaranteeDeputySubjectTypeId = ensureDataItem(session, SubjectUserRoleTypeModel, 'guarantee deputy')
guaranteeProgramTypeId = ensureDataItem(session, ProgramUserRoleTypeModel, 'guarantee')
guaranteeDeputyProgramTypeId = ensureDataItem(session, ProgramUserRoleTypeModel, 'guarantee deputy')
result = {
'guaranteeSubjectTypeId': guaranteeSubjectTypeId,
'teacherTypeId': teacherTypeId,
'guaranteeDeputySubjectTypeId': guaranteeDeputySubjectTypeId,
'guaranteeProgramTypeId': guaranteeProgramTypeId,
'guaranteeDeputyProgramTypeId': guaranteeDeputyProgramTypeId
}
finally:
session.close()
return MappingProxyType(result)
import random
def PopulateRandomData(SessionMaker):
session = SessionMaker()
ProgramModel, SubjectModel, SubjectSemesterModel, TopicModel = GetModels()
def randomizedTopic(subject, semester, index):
randomName = f'{subject.name}-{semester.name}-{index+1}'
record = TopicModel(name=randomName)
session.add(record)
session.commit()
pass
def randomizedSemester(subject):
record = SubjectSemesterModel(name='')
session.add(record)
session.commit()
semesterCount = random.randrange(1, 3)
for _ in range(semesterCount):
randomizedTopic(subject, record)
session.commit()
pass
subjectNames = randomSubjectNames()
def randomizedSubject(program):
subjectRecord = SubjectModel(name=random.choice(subjectNames))
session.add(subjectRecord)
program.subjects.append(subjectRecord)
session.commit()
semestersCount = random.randrange(10, 15)
for _ in range(semestersCount):
randomizedSemester(subjectRecord)
pass
strsA = ['IT', 'EL', 'MIL', 'GEO', 'ST']
strsB = ['Bc', 'Mgr', 'Dr']
strsC = ['P', 'K', 'O']
def randomizedProgram():
year = random.randrange(2015, 2020)
randomName = f'{random.choice(strsA)}-{random.choice(strsB)}-{random.choice(strsC)}/{year}'
programRecord = ProgramModel(name=randomName)
session.add(programRecord)
session.commit()
subjectsCount = random.randrange(10, 15)
for _ in range(subjectsCount):
randomizedSubject(programRecord)
pass
try:
randomizedProgram()
pass
finally:
session.close()
pass
def randomSubjectNames():
data = randomSubjectNamesStr()
result = data.replace(' (v angličtině)', '').split('/n')
resultArray = [item[:-1] if item[-1] in ['1', '2'] else item for item in result]
return resultArray
def randomSubjectNamesStr():
return """3D optická digitalizace 1
Agentní a multiagentní systémy
Aktuální témata grafického designu
Algebra
Algoritmy
Algoritmy (v angličtině)
Analogová elektronika 1
Analogová elektronika 2
Analogová technika
Analýza a návrh informačních systémů
Analýza binárního kódu
Analýza systémů založená na modelech
Anglická konverzace na aktuální témata
Anglická konverzace na aktuální témata
Angličtina 1: mírně pokročilí 1
Angličtina 2: mírně pokročilí 2
Angličtina 3: středně pokročilí 1
Angličtina 3: středně pokročilí 1
Angličtina 4: středně pokročilí 2
Angličtina 4: středně pokročilí 2
Angličtina pro doktorandy
Angličtina pro Evropu
Angličtina pro Evropu
Angličtina pro IT
Angličtina pro IT
Angličtina: praktický kurz obchodní konverzace a prezentace
Aplikace paralelních počítačů
Aplikovaná herní studia - výzkum a design
Aplikované evoluční algoritmy
Architektura 20. století
Architektury výpočetních systémů
Audio elektronika
Automatizované testování a dynamická analýza
Autorská práva - letní
Bakalářská práce
Bakalářská práce Erasmus (v angličtině)
Bayesovské modely pro strojové učení (v angličtině)
Bezdrátové a mobilní sítě
Bezpečná zařízení
Bezpečnost a počítačové sítě
Bezpečnost informačních systémů
Bezpečnost informačních systémů a kryptografie
Bioinformatika
Bioinformatika
Biologií inspirované počítače
Biometrické systémy
Biometrické systémy (v angličtině)
Blockchainy a decentralizované aplikace
CCNA Kybernetická bezpečnost (v angličtině)
České umění 1. poloviny 20. století v souvislostech - zimní
České umění 2. poloviny 20. století v souvislostech - letní
Chemoinformatika
Číslicové zpracování akustických signálů
Číslicové zpracování signálů (v angličtině)
CNC obrábění / Roboti v umělecké praxi
Daňový systém ČR
Databázové systémy
Databázové systémy (v angličtině)
Dějiny a filozofie techniky
Dějiny a kontexty fotografie 1
Dějiny a kontexty fotografie 2
Dějiny designu 1 - letní
Dějiny designu 1 - zimní
Desktop systémy Microsoft Windows
Digitální forenzní analýza (v angličtině)
Digitální marketing a sociální média (v angličtině)
Digitální sochařství - 3D tisk 1
Digitální sochařství - 3D tisk 2
Diplomová práce
Diplomová práce (v angličtině)
Diplomová práce Erasmus (v angličtině)
Diskrétní matematika
Dynamické jazyky
Ekonomie informačních produktů
Elektroakustika 1
Elektronický obchod (v angličtině)
Elektronika pro informační technologie
Elektrotechnický seminář
Evoluční a neurální hardware
Evoluční výpočetní techniky
Filozofie a kultura
Finanční analýza
Finanční management pro informatiky
Finanční trhy
Formální analýza programů
Formální jazyky a překladače
Formální jazyky a překladače (v angličtině)
Funkcionální a logické programování
Funkční verifikace číslicových systémů
Fyzika 1 - fyzika pro audio inženýrství
Fyzika v elektrotechnice (v angličtině)
Fyzikální optika
Fyzikální optika (v angličtině)
Fyzikální seminář
Grafická a zvuková rozhraní a normy
Grafická uživatelská rozhraní v Javě
Grafická uživatelská rozhraní v Javě (v angličtině)
Grafická uživatelská rozhraní v X Window
Grafické a multimediální procesory
Grafové algoritmy
Grafové algoritmy (v angličtině)
Hardware/Software Codesign
Hardware/Software Codesign (v angličtině)
Herní studia
Informační systémy
Informační výchova a gramotnost
Inteligentní systémy
Inteligentní systémy
Internetové aplikace
Inženýrská pedagogika a didaktika
Inženýrská pedagogika a didaktika
Jazyk C
Klasifikace a rozpoznávání
Kódování a komprese dat
Komunikační systémy pro IoT
Konvoluční neuronové sítě
Kritická analýza digitálních her
Kruhové konzultace
Kryptografie
Kultura projevu a tvorba textů
Kultura projevu a tvorba textů
Kurz pornostudií
Lineární algebra
Lineární algebra
Logika
Makroekonomie
Management
Management projektů
Manažerská komunikace a prezentace
Manažerská komunikace a prezentace
Manažerské vedení lidí a řízení času
Manažerské vedení lidí a řízení času
Matematická analýza 1
Matematická analýza 2
Matematická logika
Matematické struktury v informatice (v angličtině)
Matematické výpočty pomocí MAPLE
Matematické základy fuzzy logiky
Matematický seminář
Matematický software
Matematika 2
Maticový a tenzorový počet
Mechanika a akustika
Mikroekonomie
Mikroprocesorové a vestavěné systémy
Mikroprocesorové a vestavěné systémy (v angličtině)
Mobilní roboty
Modelování a simulace
Modelování a simulace
Moderní matematické metody v informatice
Moderní metody zobrazování 3D scény
Moderní metody zpracování řeči
Moderní teoretická informatika
Moderní trendy informatiky (v angličtině)
Molekulární biologie
Molekulární genetika
Multimédia
Multimédia (v angličtině)
Multimédia v počítačových sítích
Návrh a implementace IT služeb
Návrh a realizace elektronických přístrojů
Návrh číslicových systémů
Návrh číslicových systémů (v angličtině)
Návrh kyberfyzikálních systémů (v angličtině)
Návrh počítačových systémů
Návrh vestavěných systémů
Návrh, správa a bezpečnost
Operační systémy
Optické sítě
Optika
Optimalizace
Optimalizační metody a teorie hromadné obsluhy
Optimální řízení a identifikace
Paralelní a distribuované algoritmy
Paralelní výpočty na GPU
Pedagogická psychologie
Pedagogická psychologie
Plošné spoje a povrchová montáž
Počítačová fyzika I
Počítačová fyzika II
Počítačová grafika
Počítačová grafika
Počítačová grafika (v angličtině)
Počítačová podpora konstruování
Počítačové komunikace a sítě
Počítačové vidění (v angličtině)
Počítačový seminář
Podnikatelská laboratoř
Podnikatelské minimum
Pokročilá bioinformatika
Pokročilá matematika
Pokročilá počítačová grafika (v angličtině)
Pokročilá témata administrace operačního systému Linux
Pokročilé asemblery
Pokročilé biometrické systémy
Pokročilé číslicové systémy
Pokročilé databázové systémy
Pokročilé databázové systémy (v angličtině)
Pokročilé informační systémy
Pokročilé komunikační systémy (v angličtině)
Pokročilé operační systémy
Pokročilé směrování v páteřních sítích (ENARSI)
Pokročilé techniky návrhu číslicových systémů
Pokročilý návrh a zabezpečení podnikových sítí
Praktické aspekty vývoje software
Praktické paralelní programování
Pravděpodobnost a statistika
Právní minimum
Právní minimum
Právo informačních systémů
Přenos dat, počítačové sítě a protokoly
Přenos dat, počítačové sítě a protokoly (v angličtině)
Principy a návrh IoT systémů
Principy programovacích jazyků a OOP
Principy programovacích jazyků a OOP (v angličtině)
Principy syntézy testovatelných obvodů
Programovací seminář
Programování na strojové úrovni
Programování v .NET a C#
Programování zařízení Apple
Projektová praxe 1
Projektová praxe 1
Projektová praxe 1 (v angličtině)
Projektová praxe 1 (v angličtině)
Projektová praxe 1 (v angličtině)
Projektová praxe 1 (v angličtině)
Projektová praxe 2
Projektová praxe 2
Projektová praxe 2 (v angličtině)
Projektová praxe 2 (v angličtině)
Projektová praxe 3
Projektování datových sítí
Projektový manažer
Prostředí distribuovaných aplikací
Rádiová komunikace
Regulované gramatiky a automaty
Rétorika
Rétorika
Řízení a regulace 1
Řízení a regulace 2
Robotika (v angličtině)
Robotika a manipulátory
Robotika a zpracování obrazu
Semestrální projekt
Semestrální projekt
Semestrální projekt (v angličtině)
Semestrální projekt Erasmus (v angličtině)
Semestrální projekt Erasmus (v angličtině)
Seminář C#
Seminář C++
Seminář diskrétní matematiky a logiky
Seminář Java
Seminář Java (v angličtině)
Seminář VHDL
Senzory a měření
Serverové systémy Microsoft Windows
Signály a systémy
Simulační nástroje a techniky
Síťová kabeláž a směrování (CCNA1+CCNA2)
Síťové aplikace a správa sítí
Skriptovací jazyky
Složitost (v angličtině)
Směrování a přepínání v páteřních sítích (ENCOR)
Soft Computing
Španělština: začátečníci 1/2
Španělština: začátečníci 2/2
Správa serverů IBM zSeries
Statická analýza a verifikace
Statistika a pravděpodobnost
Statistika, stochastické procesy, operační výzkum
Strategické řízení informačních systémů
Strojové učení a rozpoznávání
Systémová biologie
Systémy odolné proti poruchám
Systémy odolné proti poruchám
Systémy pracující v reálném čase (v angličtině)
Technologie sítí LAN a WAN (CCNA3+4)
Teoretická informatika
Teoretická informatika (v angličtině)
Teorie a aplikace Petriho sítí
Teorie her
Teorie kategorií v informatice
Teorie programovacích jazyků
Testování a dynamická analýza
Tvorba aplikací pro mobilní zařízení (v angličtině)
Tvorba uživatelských rozhraní
Tvorba uživatelských rozhraní (v angličtině)
Tvorba webových stránek
Tvorba | |
<gh_stars>0
def triedrdf():
"""
Le module triedrdf regroupe des méthodes pour la reconnaissance de forme :
bayesrule : Règle de décision de Bayes
kppv : Algorithme des k-plus proches voisins
kmoys : Algorithme des k-moyennes (kmeans)
kclassif : Classification de données par proximité à des référents
"""
return None
import time
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import triedpy.triedtools as tls
#========================= Règle de Bayes =========================
def bayesrule(Prob,loss=None) :
''' BAYESCLASSES = bayesrule(Prob,loss) is the vector of classification.
| p is the (nxc) matrix of the posterior probabilities.
| loss is an optional (cxc) matrix of classication costs where loss(i,j)
| is the cost of classifying in class j a pattern of class i.
| If loss is omitted, (0,1) loss are used by default.
'''
nX, c = np.shape(Prob)
if loss is None :
loss = np.ones((c,c)) - np.eye(c,c);
# classification
Risk = np.dot(Prob,loss);
BAYESCLASSES = np.argmin(Risk.T,0)+1;
return BAYESCLASSES
#==================== k plus proches voisins ======================
def kppv (X,Xi,labelXi,k,dist=0,votopt=0) :
'''XCLASSE = kppv (X,Xi,labelXi,k,dist,votopt)
| Algorithme des k plus proches voisins (kppv)
| En entrée :
| X : Un ensemble de données (matrice nX x d) dont on veut classer les
| éléments (lignes) par l'algorithme kppv (ensemble de test)
| Xi : Ensemble de référence (d'apprentissage); matrice (nXi x d)
| labelXi : les indices de classe de référence (i.e. des éléments de l'ensemble
| de référence). Ces indices doivent commencer à 1 (>0) et non pas
| à partir de zéro.; vecteur colonne (nXi x 1)
| k : Le nombre de plus proches voisins à considérer
| dist : Distance à utiliser: 0 : Euclidienne (par défaut); sinon: Mahalanobis
| votopt : Définit l'option en cas d'egalité du vote majoritaire :
| 0 : En cas d'égalité de classe c'est alors la 1ère qui est retenue
| (c'est l'option par défaut), sinon, un tirage aléatoire est effectué.
| En sortie :
| XCLASSE : Classes des éléments de X; c'est un vecteur colonne (nX x 1)
'''
if min(labelXi)<=0 :
print("kppv: Les indices de classe de référence (labelXi) doivent être > 0");
sys.exit(0);
nX, d1 = np.shape(X);
nXi, d2 = np.shape(Xi);
c = max(labelXi);
if d1 != d2 :
print("kppv: X doit avoir la même dimension que Xi");
sys.exit(0);
if np.size(Xi,0) != np.size(labelXi) :
print("kppv, Xi et labelXi doivent avoir le même nombre d'éléments (i.e. de lignes)");
sys.exit(0);
labelXi = labelXi-1; # parce que les indices commence à 0 ...?
XCLASSE = np.zeros(nX); # Clasification des éléments par kppv
if dist!=0 : # Distance de MAHALANOBIS
SIGMA = np.zeros((c,d1,d2)); # Init. des matrices de COVARIANCE (par classe)
for i in np.arange(c) : # Calcul des matrices de COVARIANCE (par classe)
ICi = np.where(labelXi==i)[0]; # Indices des élts de la classe i
Xiclasse = Xi[ICi,:]; # Ens de Ref pour la classe i
sigma = np.cov(Xiclasse.T, ddof=1);
sigmamoins = np.linalg.inv(sigma);
SIGMA[i,:,:]= sigmamoins;
# DECISION
for i in np.arange(nX) : # Pour chaque élément de l'ensemble de TEST
D = np.zeros(nXi);
if dist==0 : # Distance de Euclidienne (on ommet la metrique I)
for j in np.arange(nXi) : # Pour chaque elt de l'ens de référence
D[j] = np.dot(X[i,:]-Xi[j,:] , X[i,:]-Xi[j,:]);
else : # Distance de Mahalanobis
for j in np.arange(nXi) : # Pour chaque elt de l'ens de référence
cl = labelXi[j];
M = np.dot(X[i,:]-Xi[j,:], SIGMA[cl,:,:]);
D[j] = np.dot(M, X[i,:]-Xi[j,:]);
# Vote majoritaire
# Tri des distances dans l'ordre du +petit au +grand
I = sorted(range(len(D)), key=lambda k: D[k])
C = labelXi[I]; # On ordonne les classes selon ce tri
classeppv = C[0:k]; # On garde les k premières classes qui correspondent
# donc au kppv dans l'ens de référence
# Vote majoritaire :
XCLASSE[i] = tls.avotemaj(classeppv,votopt=votopt)
XCLASSE = XCLASSE+1; # Pour revenir à l'indicage initial.
return XCLASSE
#=========================== k-moyennes ===========================
def kmoys (X,k,spause=0,pvisu=0,cmap=cm.jet,markersize=8,fontsize=11) :
'''PROTO, CLASSE = kmoys (X,k,spause,pvisu)
| Algorithme des k-moyennes (kmeans)
| En entrée :
| X : Est l;'ensemble de donnees. dim(X)=(N,p)
| k : Est le nombre de classes (<=N).
| [spause]: (optionel) Nombre de secondes (meme fractionnaire) de pause
| pour ralentir le code afin d'avoir le temps de voir l'évolution
| des protos sur la figure. Passer 0 si on préfère un temps
| d'exécution non ralenti (c'est la valeur par defaut).
| [pvisu] : (optionel) Vecteur de dimension 2 indiquant le plan de
| visualisation. pvisu[0] est l'abscisse, et pvisu[1] l'ordonnée.
| Par défaut le plan 1-2 est utilisé.
| cmap : La map de couleur
| markersize : La taille des marqueurs
| fontsize : La taille du text
| En sortie :
| PROTO : Est la matrice de coordonnees des prototype. dim(proto)=(k,p)
| CLASSE : Une vecteur colonne qui contient le numéro de proto définissant
| ainsi une classe pour chaque individu.
'''
N = np.size(X,0);
# plan de visualisation
if pvisu == 0 :
a = 0; o = 1; # par defaut
else :
a=pvisu[0]-1; o=pvisu[1]-1;
# Initialisation du vecteur des classes des individus
CLASSE = np.zeros(N);
# Map de couleur pour le plot des protos et de leurs trajectoires
#cmap = plt.cm.jet
Tcol = cmap(np.arange(1,256,round(256/k))) # k lignes de couleur
# Tirage des k prototypes au hasard
Iprot = np.random.permutation(N);
PROTO = X[Iprot[range(k)],:]; #print("\nproto=",proto);
prevprot = PROTO;
plt.figure();
plt.ion() # interactive graphics on
plt.plot(X[:,a], X[:,o],'+k');
# Loop initialisation ---------------
oldcritere = -1;
critere = 0;
print(" Critère Critère normalisé par");
print(" le nombre de données:");
#
# Tant que la convergence n'est pas atteinte
# On affecte chaque point a la classe la plus proche
while oldcritere != critere :
oldcritere = critere;
# Calcul d'une matrice des inerties intra
distance = np.zeros((N,k))
for i in range(N) :
for j in range(k) :
C = X[i,:] - PROTO[j,:];
distance[i,j] = np.dot(C,C);
# Calcul du critere et de la classe d'appartenance
critere = 0;
for i in range(N) :
di = distance[i,:];
minligne = min(di)
CLASSE[i] = np.argmin(di) # !!! à partir de 0, on fera +1 si necessaire à la fin ?
critere = critere + minligne;
# Positions des nouveaux prototypes
for i in range(k) :
Ic = np.where(CLASSE==i);
if np.size(Ic) > 0 :
PROTO[i,:] = np.mean(X[Ic,:],1);
# Affichage
print("% .10f % .10f" % (critere,critere/N));
for i in range(k):
plt.plot([prevprot[i,a], PROTO[i,a]], [prevprot[i,o], PROTO[i,o]],\
"o-",linewidth=3,color=Tcol[i,:],markersize=markersize);
#plt.plot(PROTO[i,a], PROTO[i,o],"o-");
prevprot = PROTO.copy();
time.sleep(spause);
plt.draw();
#
# fin du while
#--------------------------------------------------------------
for i in range(k) :
Ic = np.where(CLASSE==i);
plt.plot(X[Ic,a],X[Ic,o],"*",color=Tcol[i,:],markersize=markersize);
plt.plot(PROTO[i,a], PROTO[i,o],"s",color=[0,0,0]);
plt.text(PROTO[i,a], PROTO[i,o],str(i+1), fontsize=fontsize);
plt.axis("tight");
plt.xlabel("x%d" %(a+1));
plt.ylabel("x%d" %(o+1));
plt.title("Kmeans algorithme on data with k=%d" % (k));
#plt.ioff();
#
#--------------------------------------------------------------
CLASSE = CLASSE + 1 # On retourne des N° de classe numérotée à partir de 1
return PROTO, CLASSE
#============================ kclassif ============================
def kclassif (X,proto,clasprot=None,opt=0) :
'''KLASS = kclassif (X,proto,clasprot,opt)
| Associe aux éléments de X, la classe du prototype (référent) le plus proche
| (au sens euclidien).
| En entrée :
| X : L'ensemble des données à classer
| proto : Les référents auquels les données doivent être associées selon leur
| proximité
| clasprot : Classe des référents. Les données associées à un référent, par
| proximité, hériterons de (se veront attribuer), la classe du référent.
| Si ce paramètre n'est pas renseigné, on attribue d'office une classe
| au prototype, dans l'ordre, et à partir de 1.
| opt : Permet ou pas la prise en compte des prototypes associés à une classe
| nulle (=0). En effet, un référent qui n'aurait capté aucune donnée
| peut avoir sa valeur de classe dans clasprot = 0.
| si opt = 0 : on affectera, à une donnée, la classe du proto qui lui
| est le plus proche même si cette classe est 0 (c'est le | |
QgsMapCanvas (see setLayers)
Parameters
----------
layername
name of the layer, generally a file path or vsimem path
"""
if layername in self.layer_manager.shown_layer_names:
self.layer_manager.hide_layer(layername)
self.canvas.setLayers(self.layer_manager.shown_layers)
def _manager_remove_layer(self, layername):
"""
Remove the layer from the layer manager and remove the layer from the shown layers in the QgsMapCanvas (see setLayers)
Parameters
----------
layername
name of the layer, generally a file path or vsimem path
"""
if layername in self.layer_manager.layer_data_lookup:
self.layer_manager.remove_layer(layername)
self.canvas.setLayers(self.layer_manager.shown_layers)
# if the layer is a virtual file system object, unlink the layer to prevent mem leaks
if layername[0:7] == r'/vsimem':
gdal.Unlink(layername)
def build_line_source(self, linename: str):
"""
Build the vsimem path for the multibeam line provided
Parameters
----------
linename
name of the multibeam file
Returns
-------
str
generated vsimem path for the line
"""
return '/vsimem/{}.shp'.format(linename)
def build_surface_source(self, surfname: str, lyrname: str):
"""
Build the vsimem path for the surface/layer provided
Parameters
----------
surfname
path to the surface
lyrname
name of the surface layer you want to show
Returns
-------
str
generated vsimem path for the surface/layer
"""
newname = '{}_{}.tif'.format(os.path.splitext(surfname)[0], lyrname)
source = '/vsimem/{}'.format(newname)
return source
def set_background(self, layername: str, transparency: float, surf_transparency: float):
"""
Set the background layer(s) based on the provided layername. See the various _init for details on how these
background layer(s) are constructed.
Parameters
----------
layername
one of 'Default', 'OpenStreetMap (internet required)', etc.
transparency
the transparency of the layer as a percentage
surf_transparency
the transparency of all surfaces as a percentage
"""
print('Initializing {} with transparency of {}%'.format(layername, int(transparency * 100)))
self.layer_background = layername
self.layer_transparency = transparency
self.surface_transparency = surf_transparency
if self.layer_background == 'None':
self._init_none()
if self.layer_background == 'Default':
self._init_default_layers()
if self.layer_background == 'VDatum Coverage (VDatum required)':
self._init_vdatum_extents()
elif self.layer_background == 'OpenStreetMap (internet required)':
self._init_openstreetmap()
elif self.layer_background == 'Satellite (internet required)':
self._init_satellite()
elif self.layer_background == 'NOAA RNC (internet required)':
self._init_noaa_rnc()
elif self.layer_background == 'NOAA ENC (internet required)':
self._init_noaa_enc()
elif self.layer_background == 'GEBCO Grid (internet required)':
self._init_gebco()
elif self.layer_background == 'EMODnet Bathymetry (internet required)':
self._init_emodnet()
for lyr in self.layer_manager.surface_layers:
lyr.renderer().setOpacity(1 - self.surface_transparency)
def set_extent(self, max_lat: float, min_lat: float, max_lon: float, min_lon: float, buffer: bool = True):
"""
Set the extent of the 2d window
Parameters
----------
max_lat
set the maximum latitude of the displayed map
min_lat
set the minimum latitude of the displayed map
max_lon
set the maximum longitude of the displayed map
min_lon
set the minimum longitude of the displayed map
buffer
if True, will extend the extents by half the current width/height
"""
if buffer:
lat_buffer = np.max([(max_lat - min_lat) * 0.5, 0.5])
lon_buffer = np.max([(max_lon - min_lon) * 0.5, 0.5])
else:
lat_buffer = 0
lon_buffer = 0
min_lon = np.clip(min_lon - lon_buffer, -179.999999999, 179.999999999)
max_lon = np.clip(max_lon + lon_buffer, -179.999999999, 179.999999999)
min_lat = np.clip(min_lat - lat_buffer, -90, 90)
max_lat = np.clip(max_lat + lat_buffer, -90, 90)
self.canvas.setExtent(qgis_core.QgsRectangle(qgis_core.QgsPointXY(min_lon, min_lat),
qgis_core.QgsPointXY(max_lon, max_lat)))
def add_line(self, line_name: str, lats: np.ndarray, lons: np.ndarray, refresh: bool = False):
"""
Draw a new multibeam trackline on the mapcanvas, unless it is already there
Parameters
----------
line_name
name of the multibeam line
lats
numpy array of latitude values to plot
lons
numpy array of longitude values to plot
refresh
set to True if you want to show the line after adding here, kluster will redraw the screen after adding
lines itself
"""
source = self.build_line_source(line_name)
if ogr_output_file_exists(source):
# raise ValueError('Line {} already exists in this map view session'.format(line_name))
return
vl = VectorLayer(source, 'ESRI Shapefile', self.epsg, False)
vl.write_to_layer(line_name, np.stack([lons, lats], axis=1), 2) # ogr.wkbLineString
vl.close()
lyr = self.add_layer(source, line_name, 'ogr', QtGui.QColor('blue'), layertype='line')
if refresh:
lyr.reload()
def remove_line(self, line_name: str, refresh: bool = False):
"""
Remove a multibeam line from the mapcanvas
Parameters
----------
line_name
name of the multibeam line
refresh
optional screen refresh, True most of the time, unless you want to remove multiple lines and then refresh
at the end
"""
source = self.build_line_source(line_name)
remlyr = ogr_output_file_exists(source)
if remlyr:
self.remove_layer(source)
if refresh:
self.layer_by_name(source).reload()
def hide_line(self, line_name: str, refresh: bool = False):
"""
Hide the line so that it is not displayed, but keep the data in the layer_manager for showing later
Parameters
----------
line_name
name of the multibeam line
refresh
optional screen refresh, True most of the time, unless you want to remove multiple lines and then refresh
at the end
"""
source = self.build_line_source(line_name)
hidelyr = ogr_output_file_exists(source)
if hidelyr:
self.hide_layer(source)
if refresh:
self.layer_by_name(source).reload()
def show_line(self, line_name, refresh=False):
"""
Show the line so that it is displayed, if it was hidden
Parameters
----------
line_name
name of the multibeam line
refresh
optional screen refresh, True most of the time, unless you want to remove multiple lines and then refresh
at the end
"""
source = self.build_line_source(line_name)
showlyr = ogr_output_file_exists(source)
if showlyr:
self.show_layer(source)
if refresh:
self.layer_by_name(source).reload()
def add_surface(self, surfname: str, lyrname: str, data: list, geo_transform: list, crs: Union[CRS, int]):
"""
Add a new surface/layer with the provided data
Parameters
----------
surfname
path to the surface that is used as a name
lyrname
band layer name for the provided data
data
list of either [2d array of depth] or [2d array of depth, 2d array of vert uncertainty]
geo_transform
[x origin, x pixel size, x rotation, y origin, y rotation, -y pixel size]
crs
pyproj CRS or an integer epsg code
"""
source = self.build_surface_source(surfname, lyrname)
showlyr = gdal_output_file_exists(source)
if not showlyr:
gdal_raster_create(source, data, geo_transform, crs, np.nan, (lyrname,))
self.add_layer(source, lyrname, 'gdal', layertype='surface')
else:
self.show_surface(surfname, lyrname)
def hide_surface(self, surfname: str, lyrname: str):
"""
Hide the surface layer that corresponds to the given names.
Parameters
----------
surfname
path to the surface that is used as a name
lyrname
band layer name for the provided data
"""
source = self.build_surface_source(surfname, lyrname)
hidelyr = gdal_output_file_exists(source)
if hidelyr:
self.hide_layer(source)
def show_surface(self, surfname: str, lyrname: str):
"""
Show the surface layer that corresponds to the given names, if it was hidden
Parameters
----------
surfname
path to the surface that is used as a name
lyrname
band layer name for the provided data
"""
source = self.build_surface_source(surfname, lyrname)
showlyr = gdal_output_file_exists(source)
if showlyr:
self.show_layer(source)
def remove_surface(self, surfname: str):
"""
Remove a surface from the mapcanvas/layer_manager
Parameters
----------
surfname
path to the surface that is used as a name
"""
possible_layers = ['depth', 'vertical_uncertainty']
for lyr in possible_layers:
source = self.build_surface_source(surfname, lyr)
remlyr = gdal_output_file_exists(source)
if remlyr:
self.remove_layer(source)
def layer_point_to_map_point(self, layer: Union[qgis_core.QgsRasterLayer, qgis_core.QgsVectorLayer],
point: qgis_core.QgsPoint):
"""
Transform the provided point in layer coordinates to map coordinates. Layer is provided to get the CRS for
the transformation
Parameters
----------
layer
layer the point comes from
point
the point to transform
Returns
-------
qgis_core.QgsPoint
the transformed point
"""
crs_src = layer.crs()
crs_dest = self.crs
transform_context = self.project.transformContext()
xform = qgis_core.QgsCoordinateTransform(crs_src, crs_dest, transform_context)
newpoint = xform.transform(point)
return newpoint
def layer_extents_to_map_extents(self, layer: Union[qgis_core.QgsRasterLayer, qgis_core.QgsVectorLayer]):
"""
Transform the provided layer's extents to the map extents, and return the extents
Parameters
----------
layer
layer the extents come from
Returns
-------
qgis_core.QgsRectangle
the transformed extents
"""
extnt = layer.extent()
newmin = self.layer_point_to_map_point(layer, qgis_core.QgsPointXY(extnt.xMinimum(), extnt.yMinimum()))
newmax = self.layer_point_to_map_point(layer, qgis_core.QgsPointXY(extnt.xMaximum(), extnt.yMaximum()))
extnt = qgis_core.QgsRectangle(newmin, newmax)
return extnt
def map_point_to_layer_point(self, layer: Union[qgis_core.QgsRasterLayer, qgis_core.QgsVectorLayer],
point: qgis_core.QgsPoint):
"""
Transform the provided point in map coordinates to layer coordinates. Layer is provided to get the CRS for
the transformation
Parameters
----------
layer
layer the point comes from
point
the point to transform
Returns
-------
qgis_core.QgsPoint
the transformed point
"""
crs_src = self.crs
crs_dest = layer.crs()
transform_context = self.project.transformContext()
xform = qgis_core.QgsCoordinateTransform(crs_src, crs_dest, transform_context)
newpoint = xform.transform(point)
return newpoint
def add_layer(self, source: str, layername: str, providertype: str, color: QtGui.QColor = None,
layertype: str = 'background'):
"""
Generate the Qgs layer. provider type specifies the driver to use to open the data.
Parameters
----------
source
source str, generally a file path to the object/file
layername
layer name to use from the source data
providertype
one of ['gdal', 'wms', 'ogr']
color
optional, only used for vector layers, will set the color of that layer to the provided
layertype
corresponding to | |
<reponame>DannyWeitekamp/tutorenvs
from random import randint
from random import choice
from pprint import pprint
import logging, operator
from functools import reduce
import cv2 # pytype:disable=import-error
from PIL import Image, ImageDraw
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from sklearn.feature_extraction import FeatureHasher
from sklearn.feature_extraction import DictVectorizer
from tutorenvs.utils import OnlineDictVectorizer
import numpy as np
from colorama import Back, Fore
from tutorenvs.utils import DataShopLogger
from tutorenvs.utils import StubLogger
from tutorenvs.fsm import StateMachine
pil_logger = logging.getLogger('PIL')
pil_logger.setLevel(logging.INFO)
def same_denoms(denoms):
return len(set(denoms)) == 1
class FractionArithSymbolic:
def __init__(self, logger=None, n=2):
"""
Creates a state and sets a random problem.
"""
if logger is None:
self.logger = DataShopLogger('FractionsTutor', extra_kcs=['field'])
# self.logger = StubLogger()
else:
self.logger = logger
if n < 2:
raise Exception("n cannot be lower than 2.")
self.n = n
self.logger.set_student()
self.set_random_problem()
def create_state_machine(self):
fsm = StateMachine()
init_nums = [int(self.state['initial_num_{}'.format(i)]) for i in range(self.n)]
init_denoms = [int(self.state['initial_denom_{}'.format(i)]) for i in range(self.n)]
sd = same_denoms(init_denoms)
# TODO: Make the order insignificant?
if self.state['initial_operator'] == "*":
# Multiplication
foci = ["initial_denom_{}".format(i) for i in range(self.n)]
sai = ('answer_denom', 'UpdateField',
{'value': str(reduce(operator.mul, init_denoms))})
fsm.add_next_state(sai, foci)
foci = ["initial_num_{}".format(i) for i in range(self.n)]
sai = ('answer_num', 'UpdateField',
{'value': str(reduce(operator.mul, init_nums))})
fsm.add_next_state(sai, foci)
elif sd:
# Addition Same
foci = ["initial_denom_0"]
sai = ('answer_denom', 'UpdateField',
{'value': str(self.state['initial_denom_0'])})
fsm.add_next_state(sai, foci)
foci = ["initial_num_{}".format(i) for i in range(self.n)]
sai = ('answer_num', 'UpdateField',
{'value': str(sum(init_nums))})
fsm.add_next_state(sai, foci)
else:
# Addition Different
foci = []#["initial_denom_{}".format(i) for i in range(self.n)]
sai = ('check_convert', 'UpdateField', {'value': 'x'})
fsm.add_next_state(sai, foci)
convert_denom = reduce(operator.mul, init_denoms)
for i in range(self.n):
if(i == 0):
foci = ["initial_denom_{}".format(i) for i in range(self.n)]
else:
foci = [f"convert_denom_{i-1}"]
sai = ('convert_denom_{}'.format(i), 'UpdateField', {'value': str(convert_denom)})
fsm.add_next_state(sai, foci)
convert_nums = []
for i in range(self.n):
# foci = [*["initial_denom_{}".format(j) for j in range(self.n)],
# "initial_num_{}".format(i)]
# foci.remove("initial_denom_{}".format(i))
foci = [f"convert_denom_{i}", f"initial_num_{i}", f"initial_denom_{i}"]
convert_num = int((convert_denom * init_nums[i]) / init_denoms[i])
sai = ('convert_num_{}'.format(i), 'UpdateField', {'value': str(convert_num)})
fsm.add_next_state(sai, foci)
convert_nums.append(convert_num)
foci = ["convert_num_{}".format(i) for i in range(self.n)]
sai = ('answer_num', 'UpdateField', {'value': str(sum(convert_nums))})
fsm.add_next_state(sai, foci)
foci = [f"convert_denom_{self.n-1}"]
sai = ('answer_denom', 'UpdateField', {'value': str(convert_denom)})
fsm.add_next_state(sai, foci)
foci = ['answer_num', 'answer_denom']
sai = ('done', "ButtonPressed", {'value': -1})
fsm.add_next_state(sai, foci)
fsm.reset()
return fsm
def reset(self, nums, denoms, operator):
"""
Sets the state to a new fraction arithmetic problem as specified by the
provided arguments.
"""
self.steps = 0
self.num_correct_steps = 0
self.num_incorrect_steps = 0
self.num_hints = 0
self.state = {'check_convert': '',
'answer_num': '',
'answer_denom': '',
'initial_operator': operator,
'convert_operator': operator}
for i in range(self.n):
self.state.update({'initial_num_{}'.format(i): str(nums[i]),
'initial_denom_{}'.format(i): str(denoms[i]),
'convert_num_{}'.format(i): '',
'convert_denom_{}'.format(i): ''})
self.fsm = self.create_state_machine()
def get_possible_selections(self):
sels = ['check_convert',
'answer_num',
'answer_denom',
'done']
for i in range(self.n):
sels.extend(['convert_num_{}'.format(i),
'convert_denom_{}'.format(i)])
return sels
def get_possible_args(self):
args = ['check_convert',
'answer_num',
'answer_denom']
for i in range(self.n):
args.extend(['initial_num_{}'.format(i),
'initial_denom_{}'.format(i),
'convert_num_{}'.format(i),
'convert_denom_{}'.format(i)])
return args
# TODO
def render(self, add_dot=None):
img = self.get_image(add_counts=True, add_dot=add_dot)
cv2.imshow('vecenv', np.array(img))
cv2.waitKey(1)
# TODO
def get_image(self, add_counts=False, add_dot=None):
output = "{:>3} {:>3}\n---- {} ---- =\n{:>3} {:>3}\n\nConvert? | {} |\n\n{:>3} {:>3} {:>3}\n---- {} ---- = ----\n{:>3} {:>3} {:>3}\n".format(self.state['initial_num_left'],
self.state['initial_num_right'],
self.state['initial_operator'],
self.state['initial_denom_left'],
self.state['initial_denom_right'],
self.state['check_convert'],
self.state['convert_num_left'],
self.state['convert_num_right'],
self.state['answer_num'],
self.state['convert_operator'],
self.state['convert_denom_left'],
self.state['convert_denom_right'],
self.state['answer_denom'])
img = Image.new('RGB', (125, 150), color="white")
d = ImageDraw.Draw(img)
d.text((10, 10), output, fill='black')
# Draw input fields
# ones
# if state['answer_ones'] == " ":
# d.rectangle(((34, 71), (38, 79)), fill=None, outline='black')
# append correct/incorrect counts
if add_counts:
d.text((95, 0), "h:{}".format(self.num_hints), fill=(0,0,0))
d.text((95, 10), "-:{}".format(self.num_incorrect_steps), fill=(0,0,0))
d.text((95, 20), "+:{}".format(self.num_correct_steps), fill=(0,0,0))
# for eyes :)
# if add_dot:
# d.ellipse((add_dot[0]-3, add_dot[1]-3, add_dot[0]+3, add_dot[1]+3),
# fill=None, outline='blue')
return img
def _get_coord(self, name):
if name == "answer_num":
x = (self.n * 100) + 100
y = 200
elif name == "answer_denom":
x = (self.n * 100) + 100
y = 300
elif name == "initial_operator":
# x = (self.n * 100)
# y = 50
x = 0
y = 700
elif name == "convert_operator":
# x = (self.n * 100)
# y = 150
x = 0
y = 600
elif name == "check_convert":
# x = (self.n * 100) + 200
x = 0
y = 500
elif name == "done":
x = 0
y = 300
else:
t, n, idx = name.split("_")
c1 = 0 if t == "initial" else 200
c2 = 0 if n == "num" else 100
y = c1 + c2
x = int(idx) * 100
return {"x": x,
"y": y,
"width": 100,
"height": 100}
def get_state(self):
"""
Returns the current state as a dict.
"""
state_output = {attr:
{'id': attr, 'value': self.state[attr],
'type': 'TextField',
'contentEditable': self.state[attr] == "",
'dom_class': 'CTATTable--cell',
'above': '',
'below': '',
'to_left': '',
'to_right': '',
**self._get_coord(attr)
}
for attr in self.state}
state_output['done'] = {
'id': 'done',
'type': 'Component',
'dom_class': 'CTATDoneButton',
'above': '',
'below': '',
'to_left': '',
'to_right': '',
**self._get_coord('done')
}
return state_output
def set_random_problem(self):
typ = choice(["AD", "AS", "M"])
if typ == "AD":
ok = False
while(not ok):
nums = [str(randint(1, 15)) for _ in range(self.n)]
denoms = [str(randint(2, 15)) for _ in range(self.n)]
ok = (not any(np.array(nums) == np.array(denoms))) and (len(set(denoms)) > 1)
operator = "+"
elif typ == "AS":
denom = str(randint(2, 15))
nums = [str(randint(1, 15)) for _ in range(self.n)]
denoms = [denom for _ in range(self.n)]
operator = "+"
else: # M
nums = [str(randint(1, 15)) for _ in range(self.n)]
denoms = [str(randint(2, 15)) for _ in range(self.n)]
operator = "*"
self.set_problem(nums, denoms, operator)
print(Back.WHITE + Fore.BLACK + f"STARTING PROBLEM {'+'.join([f'({n}/{v})' for n,v in zip(nums,denoms)])}" )
def set_problem(self, nums, denoms, operator):
self.reset(nums, denoms, operator)
problem_name = "{}_{}".format(nums[0], denoms[0])
for n, d in zip(nums[1:], denoms[1:]):
problem_name += "{}_{}_{}".format(operator, n, d)
self.logger.set_problem(problem_name)
sd = same_denoms(denoms) == 1
if operator == "+" and sd:
self.ptype = 'AS'
if operator == "+" and not sd == 1:
self.ptype = 'AD'
else:
self.ptype = 'M'
def apply_sai(self, selection, action, inputs):
"""
Give a SAI, it applies it. This method returns feedback
(i.e., -1 or 1).
"""
self.steps += 1
reward = self.fsm.apply(selection, action, inputs)
if reward > 0:
outcome = "CORRECT"
self.num_correct_steps += 1
else:
outcome = "INCORRECT"
self.num_incorrect_steps += 1
self.logger.log_step(selection, action, inputs['value'], outcome,
step_name=self.ptype + '_' + selection,
kcs=[self.ptype + '_' + selection])
# Render output?
# self.render()
if reward == -1.0:
return reward
if selection == "done":
self.set_random_problem()
elif reward > 0:
self.state[selection] = inputs['value']
return reward
def request_demo(self, return_foci=False):
demo = self.get_demo(return_foci)
sai, foci = demo if(return_foci) else (demo, None)
feedback_text = "selection: %s, action: %s, input: %s" % (sai[0],
sai[1], sai[2]['value'])
self.logger.log_hint(feedback_text, step_name=self.ptype + '_' +
sai[0], kcs=[self.ptype + '_' + sai[0]])
self.num_hints += 1
return demo
def get_demo(self, return_foci=False):
"""
Returns a correct next-step SAI
"""
sai = self.fsm.cur_state.sai
foci = self.fsm.cur_state.foci
if(return_foci):
return sai, foci
else:
return sai
class FractionArithNumberEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
self.tutor = FractionArithSymbolic()
n_selections = len(self.tutor.get_possible_selections())
n_features = 900
self.dv = OnlineDictVectorizer(n_features)
self.observation_space = spaces.Box(
low=0.0, high=1.0, shape=(1, n_features), dtype=np.float32)
self.action_space = spaces.MultiDiscrete([n_selections, 450])
self.n_steps = 0
self.max_steps = 100000
def step(self, action):
self.n_steps += 1
s, a, i = self.decode(action)
# print("STEP", s, a, i)
# print()
reward = self.tutor.apply_sai(s, a, i)
# self.render()
# print(reward)
state = self.tutor.state
# pprint(state)
obs = self.dv.fit_transform([state])[0]
done = (s == 'done' and reward == 1.0)
# have a max steps for a given problem.
# When we hit that we're done regardless.
if self.n_steps > self.max_steps:
done = True
info = {}
return obs, reward, done, info
def encode(self, sai):
s,a,i = sai
out = np.zeros(1,dtype=np.int64)
enc_s = self.tutor.get_possible_selections().index(s)
if(s == 'done' or s == "check_convert"):
v = 0
else:
v = int(i['value']) - 1
# n = len(self.tutor.get_possible_selections())
out[0] = 450 * enc_s + int(v)
return out
def request_demo_encoded(self):
action = self.tutor.request_demo()
# print("DEMO ACTION:", action)
return self.encode(action)
def decode(self, action):
# print(action)
s = self.tutor.get_possible_selections()[action[0]]
if s == "done":
a = "ButtonPressed"
else:
a = "UpdateField"
if s == "done":
v = -1
if s == "check_convert":
v = "x"
else:
v = action[1] + 1
i = {'value': str(v)}
# | |
dequeue from the queue_type
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
# wait until the job expires
time.sleep(self.queue._job_expire_interval / 1000.00)
# requeue the job
self.queue.requeue()
self.assertFalse(
self.queue._r.exists('%s:%s:active' % (
self.queue._key_prefix, self._test_queue_type)))
def test_requeue_queue_type_ready_set(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# dequeue from the queue_type
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
# wait until the job expires
time.sleep(self.queue._job_expire_interval / 1000.00)
# requeue the job
self.queue.requeue()
queue_type_ready_set = self.queue._r.smembers(
'%s:ready:queue_type' % self.queue._key_prefix)
self.assertEqual(len(queue_type_ready_set), 1)
self.assertEqual(queue_type_ready_set.pop(), self._test_queue_type)
def test_requeue_queue_type_active_set(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# dequeue from the queue_type
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
# wait until the job expires
time.sleep(self.queue._job_expire_interval / 1000.00)
# requeue the job
self.queue.requeue()
queue_type_active_set = self.queue._r.smembers(
'%s:active:queue_type' % self.queue._key_prefix)
self.assertEqual(len(queue_type_active_set), 0)
def test_requeue_requeue_limit_5(self):
# with requeue limit as 5
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
requeue_limit=self._test_requeue_limit_5
)
# dequeue from the queue_type
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
self.assertEqual(
response['requeues_remaining'], self._test_requeue_limit_5)
# wait until the job expires
time.sleep(self.queue._job_expire_interval / 1000.00)
# requeue the job
self.queue.requeue()
# dequeue from the queue_type
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
self.assertEqual(
response['requeues_remaining'], self._test_requeue_limit_5 - 1)
# wait until the job expires
time.sleep(self.queue._job_expire_interval / 1000.00)
# requeue the job
self.queue.requeue()
# dequeue from the queue_type
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
self.assertEqual(
response['requeues_remaining'], self._test_requeue_limit_5 - 2)
def test_requeue_requeue_limit_0(self):
# with requeue limit as 0
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
requeue_limit=self._test_requeue_limit_0
)
# dequeue from the queue_type
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
self.assertEqual(
response['requeues_remaining'], self._test_requeue_limit_0)
# wait until the job expires
time.sleep(self.queue._job_expire_interval / 1000.00)
# requeue the job
self.queue.requeue()
# dequeue from the queue_type
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
self.assertEqual(response['status'], 'failure')
def test_requeue_requeue_limit_neg_1(self):
# with requeue limit as -1 (requeue infinitely)
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
requeue_limit=self._test_requeue_limit_neg_1
)
# dequeue from the queue_type
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
self.assertEqual(
response['requeues_remaining'], self._test_requeue_limit_neg_1)
# wait until the job expires
time.sleep(self.queue._job_expire_interval / 1000.00)
# requeue the job
self.queue.requeue()
# dequeue from the queue_type
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
self.assertEqual(
response['requeues_remaining'], self._test_requeue_limit_neg_1)
# wait until the job expires
time.sleep(self.queue._job_expire_interval / 1000.00)
# requeue the job
self.queue.requeue()
# dequeue from the queue_type
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
self.assertEqual(
response['requeues_remaining'], self._test_requeue_limit_neg_1)
# wait until the job expires
time.sleep(self.queue._job_expire_interval / 1000.00)
# requeue the job
self.queue.requeue()
# dequeue from the queue_type
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
self.assertEqual(
response['requeues_remaining'], self._test_requeue_limit_neg_1)
# wait until the job expires
time.sleep(self.queue._job_expire_interval / 1000.00)
# requeue the job
self.queue.requeue()
self.assertEqual(
response['requeues_remaining'], self._test_requeue_limit_neg_1)
# wait until the job expires
time.sleep(self.queue._job_expire_interval / 1000.00)
def test_interval_non_existent_queue(self):
response = self.queue.interval(
interval=1000,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type
)
self.assertEqual(response['status'], 'failure')
interval_map_name = '%s:interval' % (self.queue._key_prefix)
# check if interval map exists
self.assertFalse(self.queue._r.exists(interval_map_name))
def test_interval_existent_queue(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# check if interval is saved in the appropriate structure
interval_map_name = '%s:interval' % (self.queue._key_prefix)
# check if interval map exists
self.assertTrue(self.queue._r.exists(interval_map_name))
# check the value
interval_map_key = '%s:%s' % (
self._test_queue_type, self._test_queue_id)
interval = self.queue._r.hget(interval_map_name, interval_map_key)
self.assertEqual(interval, '10000')
# set the interval to 5s (5000ms)
response = self.queue.interval(
interval=5000,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type
)
self.assertEqual(response['status'], 'success')
# check if interval is saved in the appropriate structure
interval_map_name = '%s:interval' % (self.queue._key_prefix)
# check if interval map exists
self.assertTrue(self.queue._r.exists(interval_map_name))
# check the value
# check the value
interval_map_key = '%s:%s' % (
self._test_queue_type, self._test_queue_id)
interval = self.queue._r.hget(interval_map_name, interval_map_key)
self.assertEqual(interval, '5000')
def test_metrics_response_status(self):
response = self.queue.metrics()
self.assertEqual(response['status'], 'success')
response = self.queue.metrics(self._test_queue_type)
self.assertEqual(response['status'], 'success')
response = self.queue.metrics(
self._test_queue_type, self._test_queue_id)
self.assertEqual(response['status'], 'success')
def test_metrics_response_queue_types(self):
response = self.queue.metrics()
self.assertEqual(response['queue_types'], [])
self.assertEqual(len(response['enqueue_counts'].values()), 10)
self.assertEqual(sum(response['enqueue_counts'].values()), 0)
self.assertEqual(len(response['dequeue_counts'].values()), 10)
self.assertEqual(sum(response['dequeue_counts'].values()), 0)
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
response = self.queue.metrics()
self.assertEqual(response['queue_types'], [self._test_queue_type])
self.assertEqual(len(response['enqueue_counts'].values()), 10)
self.assertEqual(sum(response['enqueue_counts'].values()), 1)
self.assertEqual(len(response['dequeue_counts'].values()), 10)
self.assertEqual(sum(response['dequeue_counts'].values()), 0)
response = self.queue.dequeue(queue_type=self._test_queue_type)
response = self.queue.metrics()
self.assertEqual(len(response['dequeue_counts'].values()), 10)
self.assertEqual(sum(response['dequeue_counts'].values()), 1)
def test_metrics_response_queue_ids(self):
response = self.queue.metrics(queue_type=self._test_queue_type)
self.assertEqual(response['queue_ids'], [])
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
response = self.queue.metrics(queue_type=self._test_queue_type)
self.assertEqual(response['queue_ids'], [self._test_queue_id])
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
response = self.queue.metrics(queue_type=self._test_queue_type)
self.assertEqual(response['queue_ids'], [self._test_queue_id])
def test_metrics_response_enqueue_counts_list(self):
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
self.assertEqual(len(response['enqueue_counts'].values()), 10)
self.assertEqual(sum(response['enqueue_counts'].values()), 0)
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
self.assertEqual(len(response['enqueue_counts'].values()), 10)
self.assertEqual(sum(response['enqueue_counts'].values()), 1)
def test_metrics_response_dequeue_counts_list(self):
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
self.assertEqual(len(response['dequeue_counts'].values()), 10)
self.assertEqual(sum(response['dequeue_counts'].values()), 0)
response = self.queue.dequeue(queue_type=self._test_queue_type)
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
self.assertEqual(len(response['dequeue_counts'].values()), 10)
self.assertEqual(sum(response['dequeue_counts'].values()), 0)
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
response = self.queue.dequeue(queue_type=self._test_queue_type)
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
self.assertEqual(len(response['dequeue_counts'].values()), 10)
self.assertEqual(sum(response['dequeue_counts'].values()), 1)
def test_metrics_response_queue_length(self):
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
self.assertEqual(response['queue_length'], 0)
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
self.assertEqual(response['queue_length'], 1)
response = self.queue.dequeue(queue_type=self._test_queue_type)
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
self.assertEqual(response['queue_length'], 0)
def test_metrics_enqueue_sliding_window(self):
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
global_response = self.queue.metrics()
self.assertEqual(len(response['enqueue_counts'].values()), 10)
self.assertEqual(sum(response['enqueue_counts'].values()), 0)
self.assertEqual(len(global_response['enqueue_counts'].values()), 10)
self.assertEqual(sum(global_response['enqueue_counts'].values()), 0)
# enqueue a job
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
timestamp = int(generate_epoch())
# epoch for the minute.
timestamp_minute = str(int(math.floor(timestamp / 60000.0) * 60000))
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
global_response = self.queue.metrics()
self.assertEqual(response['enqueue_counts'][timestamp_minute], 1)
self.assertEqual(
global_response['enqueue_counts'][timestamp_minute], 1)
# enqueue another job
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
global_response = self.queue.metrics()
self.assertEqual(response['enqueue_counts'][timestamp_minute], 2)
self.assertEqual(
global_response['enqueue_counts'][timestamp_minute], 2)
# wait for one minute
time.sleep(65) # 65 seconds
# check the last minute value.
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
global_response = self.queue.metrics()
self.assertEqual(response['enqueue_counts'][timestamp_minute], 2)
self.assertEqual(
global_response['enqueue_counts'][timestamp_minute], 2)
# save the old value before overwriting
old_1_timestamp_minute = timestamp_minute
# check the current minute value
timestamp = int(generate_epoch())
# epoch for the minute.
timestamp_minute = str(int(math.floor(timestamp / 60000.0) * 60000))
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
global_response = self.queue.metrics()
self.assertEqual(response['enqueue_counts'][timestamp_minute], 0)
self.assertEqual(
global_response['enqueue_counts'][timestamp_minute], 0)
# enqueue a job in the current minute
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
global_response = self.queue.metrics()
self.assertEqual(response['enqueue_counts'][timestamp_minute], 1)
self.assertEqual(response['enqueue_counts'][old_1_timestamp_minute], 2)
self.assertEqual(
global_response['enqueue_counts'][timestamp_minute], 1)
self.assertEqual(
global_response['enqueue_counts'][old_1_timestamp_minute], 2)
time.sleep(65) # sleep for another 65s
# save the old timestamp
old_2_timestamp_minute = timestamp_minute
# check the current minute value
timestamp = int(generate_epoch())
# epoch for the minute.
timestamp_minute = str(int(math.floor(timestamp / 60000.0) * 60000))
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
global_response = self.queue.metrics()
self.assertEqual(response['enqueue_counts'][timestamp_minute], 0)
self.assertEqual(
global_response['enqueue_counts'][timestamp_minute], 0)
# enqueue a job in the current minute
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
global_response = self.queue.metrics()
self.assertEqual(response['enqueue_counts'][timestamp_minute], 1)
self.assertEqual(response['enqueue_counts'][old_1_timestamp_minute], 2)
self.assertEqual(response['enqueue_counts'][old_2_timestamp_minute], 1)
self.assertEqual(
global_response['enqueue_counts'][timestamp_minute], 1)
self.assertEqual(
global_response['enqueue_counts'][old_1_timestamp_minute], 2)
self.assertEqual(
global_response['enqueue_counts'][old_2_timestamp_minute], 1)
def test_metrics_dequeue_sliding_window(self):
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
global_response = self.queue.metrics()
self.assertEqual(len(response['dequeue_counts'].values()), 10)
self.assertEqual(sum(response['dequeue_counts'].values()), 0)
self.assertEqual(len(global_response['dequeue_counts'].values()), 10)
self.assertEqual(sum(global_response['dequeue_counts'].values()), 0)
# enqueue a job
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=100, # 100ms
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
timestamp = int(generate_epoch())
# epoch for the minute.
timestamp_minute = str(int(math.floor(timestamp / 60000.0) * 60000))
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
global_response = self.queue.metrics()
self.assertEqual(response['dequeue_counts'][timestamp_minute], 1)
self.assertEqual(
global_response['dequeue_counts'][timestamp_minute], 1)
# enqueue another job
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=100, # 100ms
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
time.sleep(0.1) # 100ms
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
global_response = self.queue.metrics()
self.assertEqual(response['dequeue_counts'][timestamp_minute], 2)
self.assertEqual(
global_response['dequeue_counts'][timestamp_minute], 2)
# wait for one minute
time.sleep(65) # 65 seconds
# check the last minute value.
response = self.queue.metrics(
queue_type=self._test_queue_type, queue_id=self._test_queue_id)
global_response = self.queue.metrics()
self.assertEqual(response['dequeue_counts'][timestamp_minute], 2)
self.assertEqual(
global_response['dequeue_counts'][timestamp_minute], 2)
# save the old value before overwriting
old_1_timestamp_minute = timestamp_minute
# check the current minute value
timestamp = int(generate_epoch())
# epoch for the minute.
timestamp_minute = str(int(math.floor(timestamp / 60000.0) * 60000))
response = self.queue.metrics(
| |
:type role: :class:`Roles`
:returns: role info
:rtype: :class:`~c4.system.configuration.RoleInfo`
"""
key = "/roles/{role}".format(role=role.name)
value = self.store.get(key)
if value:
return deserialize(value)
return None
def getRoles(self):
"""
Get a mapping of roles to role info objects
:returns: mappings
:rtype: dict
"""
rolesPrefix = "/roles/"
# note that key is the role name and value is the role info
return {
key.replace(rolesPrefix, ""): deserialize(value)
for key, value in self.store.getPrefix(rolesPrefix)
}
def getTargetState(self, node, name=None):
"""
Get the target state of a node or device manager.
:param node: node
:type node: str
:param name: device manager name
:type name: str
:returns: :class:`~c4.system.configuration.States`
"""
state = self.getProperty(node, name, "targetState")
if state is None:
return None
return States.valueOf(state)
def getNode(self, node, includeDevices=True, flatDeviceHierarchy=False):
"""
Get node information for the specified system manager
:param node: node
:type node: str
:param includeDevices: include devices for the node
:type includeDevices: bool
:param flatDeviceHierarchy: flatten device hierarchy
:type flatDeviceHierarchy: bool
:returns: node
:rtype: :class:`~c4.system.configuration.NodeInfo`
"""
try:
if includeDevices:
if BasicVersion(sqlite3.sqlite_version) < SqliteCTEMinimumVersion:
# For versions of sqlite without common table expressions it is necessary to
# emulate a hierarchical query
# Start from the system manager
frontier = self.database.query("""
select id, 0, name, state, type, details, parent_id
from t_sm_configuration
where parent_id is null and name is ?""", (node,))
if len(frontier) == 0:
raise Exception("Invalid name for system manager")
rows = []
while len(frontier) > 0:
visiting = frontier.pop(0)
rows.append(visiting)
# Add the children of current device
frontier.extend(self.database.query("""
select t.id as id,
? as level,
? || "." || t.name as name,
t.state as state,
t.type as type,
t.details as details,
t.parent_id as parent_id
from t_sm_configuration as t
where parent_id = ?""", (visiting[1]+1, visiting["name"], visiting["id"])))
else:
rows = self.database.query("""
with recursive
configuration(id, level, name, state, type, details, parent_id) as (
select id, 0, name, state, type, details, parent_id
from t_sm_configuration
where parent_id is null and name is ?
union all
select t.id, configuration.level+1, configuration.name || "." || t.name, t.state, t.type, t.details, t.parent_id
from t_sm_configuration as t join configuration on t.parent_id=configuration.id
order by 2 desc
)
select * from configuration;""", (node,))
else:
rows = self.database.query("""
select * from t_sm_configuration
where parent_id is null and name is ?""", (node,))
if not rows:
return None
# deal with node information
nodeRow = rows.pop(0)
nodeDetailsJSON = nodeRow["details"]
nodeProperties = json.loads(nodeDetailsJSON)
nodeRole = Roles.valueOf(nodeProperties.pop("role"))
nodeState = States.valueOf(nodeRow["state"])
nodeInfo = NodeInfo(nodeRow["name"], nodeProperties["address"], role=nodeRole, state=nodeState)
nodeInfo._id = nodeRow["id"]
nodeInfo.properties = nodeProperties
if rows:
if not flatDeviceHierarchy:
root = NodeInfo("root", None)
root.devices[nodeRow["name"]] = nodeInfo
for row in rows:
# split fully qualified name into path and name
currentPath = row["name"].split(".")
detailsJSON = row["details"]
properties = json.loads(detailsJSON)
if flatDeviceHierarchy:
# strip node name from device name
currentPath.pop(0)
deviceName = ".".join(currentPath)
# create device information
deviceInfo = DeviceInfo(deviceName, row["type"], state=States.valueOf(row["state"]))
deviceInfo._id = row["id"]
deviceInfo._parentId = row["parent_id"]
deviceInfo.properties = properties
nodeInfo.devices[deviceName] = deviceInfo
else:
# create device information
name = currentPath.pop()
deviceInfo = DeviceInfo(name, row["type"], state=States.valueOf(row["state"]))
deviceInfo._id = row["id"]
deviceInfo._parentId = row["parent_id"]
deviceInfo.properties = properties
# traverse path to parent
currentDeviceInfo = root
for pathElement in currentPath:
currentDeviceInfo = currentDeviceInfo.devices[pathElement]
currentDeviceInfo.addDevice(deviceInfo)
return nodeInfo
except Exception as e:
import traceback
self.log.error(traceback.format_exc())
self.log.error("could not get node info for '%s': '%s'", node, e)
return None
def getNodeNames(self):
"""
Return a list of node names.
"""
rows = self.database.query("""
select name from t_sm_configuration
where parent_id is null""")
return [row["name"] for row in rows]
def refresh(self):
"""
Refresh information from backend
"""
#FIXME: not implemented, used to refresh cached values for etcd
pass
def removeDevice(self, node, fullDeviceName):
"""
Remove a device from the configuration
:param node: node name
:type node: str
:param fullDeviceName: fully qualified device name
:type fullDeviceName: str
"""
devices = self.getDevices(node, flatDeviceHierarchy=True)
# get matching device and its children
rowIds = sorted([(device._id,) for device in devices.values() if device.name.startswith(fullDeviceName)])
if rowIds:
self.database.writeMany("""
delete from t_sm_configuration where id is ?""",
*rowIds)
else:
self.log.error("could not remove '%s' from '%s' because it does not exist", fullDeviceName, node)
def removeNode(self, node):
"""
Remove node from the configuration
:param node: node name
:type node: str
"""
nodeInfo = self.getNode(node, flatDeviceHierarchy=True)
if nodeInfo is None:
self.log.error("could not remove '%s' because it does not exist", node)
return
rowIds = [(nodeInfo._id,)]
rowIds.extend([(device._id,) for device in nodeInfo.devices.values()])
rowIds = sorted(rowIds)
self.database.writeMany(
"""delete from t_sm_configuration where id is ?""",
*rowIds)
# remove aliases for node
self.database.writeCommit(
"""delete from t_sm_configuration_alias where node_name=?""",
(node,))
def removeProperty(self, node, name, propertyName):
"""
Remove property property from a system or device manager
:param node: node
:type node: str
:param name: device manager name
:type name: str
:param property: property
:type property: str
"""
rowId, details = self._getDetails(node, name)
if propertyName in details:
del details[propertyName]
self.database.writeCommit("update t_sm_configuration set details = ? where id is ?", (json.dumps(details), rowId))
def removeRoleInfo(self, role):
"""
Remove role information
:param role: role
:type role: :class:`Roles`
"""
key = "/roles/{role}".format(role=role.name)
self.store.delete(key)
def resetDeviceStates(self):
"""
Sets the states of all devices to REGISTERED unless their state is
MAINTENANCE or UNDEPLOYED.
"""
self.database.writeCommit(
"""
update t_sm_configuration set state = ?
where parent_id is not null
and state is not 'MAINTENANCE'
and state is not 'REGISTERED'
and state is not 'UNDEPLOYED'""",
(States.REGISTERED.name,))
def resolveAlias(self, alias):
"""
Get node name for the specified alias
:param alias: alias
:type alias: str
:returns: node name
:rtype: str
"""
rows = self.database.query(
"""
select node_name from t_sm_configuration_alias
where alias is ?""",
(alias,))
if rows:
return rows[0]["node_name"]
return None
@ClassLogger
class SharedSqliteDBDeviceHistory(DeviceHistory):
"""
Shared SQLite database backend device manager history implementation
:param database: database manager
:type database: :class:`~DBManager`
"""
def __init__(self, database):
self.database = database
def add(self, node, name, status, ttl=None):
"""
Add status for device manager with specified name on specified node
:param node: node name
:type node: str
:param name: device manager name
:type name: str
:param status: status
:type status: :class:`DeviceManagerStatus`
:param ttl: time to live (in seconds), infinite by default
:type ttl: int
"""
if not isinstance(status, DeviceManagerStatus):
raise ValueError("'{0}' needs to be a '{1}'".format(status, DeviceManagerStatus))
if ttl is not None:
raise NotImplementedError("SQLite does not support time to live attributes")
timestamp = status.timestamp.toISOFormattedString()
serializedStatus = status.toJSON(includeClassInfo=True)
self.database.write("begin")
self.database.write("""
insert into history (node, name, timestamp, status) values (?, ?, ?, ?)""",
(node, name, timestamp, serializedStatus))
self.database.write("""
insert or replace into status (node, name, status) values (?, ?, ?)""",
(node, name, serializedStatus))
self.database.write("commit")
def get(self, node, name, limit=None):
"""
Get status history for device manager with specified name on specified node
:param node: node name
:type node: str
:param name: device manager name
:type name: str
:param limit: number of statuses to return
:type limit: int
:returns: list of history entries
:rtype: [:class:`Entry`]
"""
rows = self.database.query("""
select status from history
where node=? and name=?
order by timestamp desc
limit ?""",
(node, name, limit or -1))
entries = []
for row in rows:
status = JSONSerializable.fromJSON(row["status"])
entries.append(Entry(status.timestamp, status))
return entries
def getAll(self):
"""
Get status history for all device managers on all nodes
:returns: list of history entries
:rtype: [:class:`Entry`]
"""
rows = self.database.query("""
select status from history
where name is not null
order by timestamp desc""")
entries = []
for row in rows:
status = JSONSerializable.fromJSON(row["status"])
entries.append(Entry(status.timestamp, status))
return entries
def getLatest(self, node, name):
"""
Get latest status for device manager with specified name on specified node
:param node: node name
:type node: str
:param name: device manager name
:type name: str
:returns: history entry
:rtype: :class:`Entry`
"""
rows = self.database.query(
"""
select status from status
where node=? and name=?""",
(node, name))
if rows:
status = JSONSerializable.fromJSON(rows[0]["status"])
return Entry(status.timestamp, status)
return None
def remove(self, node=None, name=None):
"""
Remove status history for device managers with specified names on specified nodes.
node and name:
remove history for specific device on a specific node
node and no name
remove history for all devices on a specific node
no node and name
remove history for specific device on all nodes
no node and no name
remove history | |
<reponame>Unknown-Data/QGCN
import os
import pickle
from functools import partial
from itertools import permutations, combinations
import networkx as nx
import numpy as np
from bitstring import BitArray
from collections import Counter
try:
from graph_measures.features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta
except ModuleNotFoundError as e:
from features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta
CUR_PATH = os.path.realpath(__file__)
BASE_PATH = os.path.dirname(os.path.dirname(CUR_PATH))
VERBOSE = False
DEBUG =False
SAVE_COUNTED_MOTIFS = False
interesting_groups = [
sorted([0, 1, 8, 27])
]
class MotifsNodeCalculator(NodeFeatureCalculator):
def __init__(self, *args, level=3, **kwargs):
super(MotifsNodeCalculator, self).__init__(*args, **kwargs)
assert level in [3, 4], "Unsupported motif level %d" % (level,)
self._level = level
self._node_variations = {}
self._all_motifs = None
self._print_name += "_%d" % (self._level,)
self._gnx = self._gnx.copy()
self._load_variations()
self._counted_motifs = set() # Only used if SAVE_COUNTED_MOTIFS is set
self._double_counter = Counter()
def is_relevant(self):
return True
@classmethod
def print_name(cls, level=None):
print_name = super(MotifsNodeCalculator, cls).print_name()
if level is None:
return print_name
return "%s_%d" % (print_name, level)
# name = super(MotifsNodeCalculator, cls).print_name()
# name.split("_")[0]
def _load_variations_file(self):
fname = "%d_%sdirected.pkl" % (self._level, "" if self._gnx.is_directed() else "un")
fpath = os.path.join(BASE_PATH, "motif_variations", fname)
return pickle.load(open(fpath, "rb"))
def _load_variations(self):
self._node_variations = self._load_variations_file()
self._all_motifs = set(self._node_variations.values())
# here we pass on the edges of the sub-graph containing only the bunch nodes
# and calculate the expected index of each edge (with respect to whether the graph is directed on not)
# the formulas were calculated by common reason
# combinations index: sum_0_to_n1-1((n - i) - 1) + n2 - n1 - 1
# permutations index: each set has (n - 1) items, so determining the set is by n1, and inside the set by n2
def _get_group_number_opt1(self, nbunch):
subgnx = self._gnx.subgraph(nbunch)
nodes = {node: i for i, node in enumerate(subgnx)}
n = len(nodes)
if subgnx.is_directed():
def edge_index(n1, n2):
return n1 * (n - 1) + n2 - (1 * (n2 > n1))
else:
def edge_index(n1, n2):
n1, n2 = min(n1, n2), max(n1, n2)
return (n1 / 2) * (2 * n - 3 - n1) + n2 - 1
return sum(2 ** edge_index(nodes[edge[0]], nodes[edge[1]]) for edge in subgnx.edges())
# passing on all:
# * undirected graph: combinations [(n*(n-1)/2) combs - handshake lemma]
# * directed graph: permutations [(n*(n-1) perms - handshake lemma with respect to order]
# checking whether the edge exist in the graph - and construct a bitmask of the existing edges
def _get_group_number(self, nbunch):
func = permutations if self._gnx.is_directed() else combinations
if DEBUG:
pass
return BitArray(self._gnx.has_edge(n1, n2) for n1, n2 in func(nbunch, 2)).uint
# def _get_motif_sub_tree(self, root, length):
# implementing the "Kavosh" algorithm for subgroups of length 3
def _get_motif3_sub_tree(self, root):
visited_vertices = {root: 0}
visited_index = 1
# variation == (1, 1)
first_neighbors = set(nx.all_neighbors(self._gnx, root))
# neighbors, visited_neighbors = tee(first_neighbors)
for n1 in first_neighbors:
visited_vertices[n1] = visited_index
visited_index += 1
for n1 in first_neighbors:
last_neighbors = set(nx.all_neighbors(self._gnx, n1))
for n2 in last_neighbors:
if n2 in visited_vertices:
if visited_vertices[n1] < visited_vertices[n2]:
yield [root, n1, n2]
else:
visited_vertices[n2] = visited_index
visited_index += 1
yield [root, n1, n2]
# variation == (2, 0)
for n1, n2 in combinations(first_neighbors, 2):
if (visited_vertices[n1] < visited_vertices[n2]) and \
not (self._gnx.has_edge(n1, n2) or self._gnx.has_edge(n2, n1)):
yield [root, n1, n2]
# implementing the "Kavosh" algorithm for subgroups of length 4
def _get_motif4_sub_tree(self, root):
visited_vertices = {root: 0}
# visited_index = 1
# variation == (1, 1, 1)
neighbors_first_deg = set(nx.all_neighbors(self._gnx, root))
# neighbors_first_deg, visited_neighbors, len_a = tee(neighbors_first_deg, 3)
neighbors_first_deg = visited_neighbors = list(neighbors_first_deg)
for n1 in visited_neighbors:
visited_vertices[n1] = 1
for n1, n2, n3 in combinations(neighbors_first_deg, 3):
group = [root, n1, n2, n3]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
for n1 in neighbors_first_deg:
if DEBUG:
pass
neighbors_sec_deg = set(nx.all_neighbors(self._gnx, n1))
# neighbors_sec_deg, visited_neighbors, len_b = tee(neighbors_sec_deg, 3)
neighbors_sec_deg = visited_neighbors = list(neighbors_sec_deg)
for n in visited_neighbors:
if n not in visited_vertices:
if DEBUG:
if n is 1:
hi = 0.5
visited_vertices[n] = 2
for n2 in neighbors_sec_deg:
for n11 in neighbors_first_deg:
if visited_vertices[n2] == 2 and n1 != n11:
edge_exists = (self._gnx.has_edge(n2, n11) or self._gnx.has_edge(n11, n2))
if (not edge_exists) or (edge_exists and n1 < n11):
group = [root, n1, n11, n2]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
# for n1 in neighbors_first_deg:
# if DEBUG:
# if root is 41:
# print('n1', n1)
# neighbors_sec_deg = set(nx.all_neighbors(self._gnx, n1))
# # neighbors_sec_deg, visited_neighbors, len_b = tee(neighbors_sec_deg, 3)
# neighbors_sec_deg = visited_neighbors = list(neighbors_sec_deg)
for comb in combinations(neighbors_sec_deg, 2):
if DEBUG:
if root is 41:
hi = 1
if 2 == visited_vertices[comb[0]] and visited_vertices[comb[1]] == 2:
group = [root, n1, comb[0], comb[1]]
if DEBUG:
if root is 41:
print('A 41 group:', group)
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
for n1 in neighbors_first_deg:
if DEBUG:
pass
neighbors_sec_deg = set(nx.all_neighbors(self._gnx, n1))
# neighbors_sec_deg, visited_neighbors, len_b = tee(neighbors_sec_deg, 3)
neighbors_sec_deg = visited_neighbors = list(neighbors_sec_deg)
for n2 in neighbors_sec_deg:
if visited_vertices[n2] == 1:
continue
for n3 in set(nx.all_neighbors(self._gnx, n2)):
if DEBUG:
if root is 0 and n1 is 27 and n2 is 8 and n3 is 1:
hi = 1.5
if n3 not in visited_vertices:
if DEBUG:
pass
visited_vertices[n3] = 3
if visited_vertices[n2] == 2:
group = [root, n1, n2, n3]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
else:
if visited_vertices[n3] == 1:
continue
if visited_vertices[n3] == 2 and not (self._gnx.has_edge(n1, n3) or self._gnx.has_edge(n3, n1)):
group = [root, n1, n2, n3]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
elif visited_vertices[n3] == 3 and visited_vertices[n2] == 2:
group = [root, n1, n2, n3]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
def _order_by_degree(self, gnx=None):
if gnx is None:
gnx = self._gnx
return sorted(gnx, key=lambda n: len(list(nx.all_neighbors(gnx, n))), reverse=True)
def _calculate_motif(self):
# consider first calculating the nth neighborhood of a node
# and then iterate only over the corresponding graph
motif_func = self._get_motif3_sub_tree if self._level == 3 else self._get_motif4_sub_tree
sorted_nodes = self._order_by_degree()
for node in sorted_nodes:
for group in motif_func(node):
group_num = self._get_group_number(group)
motif_num = self._node_variations[group_num]
yield group, group_num, motif_num
if VERBOSE:
self._logger.debug("Finished node: %s" % node)
self._gnx.remove_node(node)
def _update_nodes_group(self, group, motif_num):
for node in group:
self._features[node][motif_num] += 1
def _calculate(self, include=None):
m_gnx = self._gnx.copy()
motif_counter = {motif_number: 0 for motif_number in self._all_motifs}
self._features = {node: motif_counter.copy() for node in self._gnx}
for i, (group, group_num, motif_num) in enumerate(self._calculate_motif()):
if DEBUG:
if 21 in group and motif_num is 47:
print('A 21/47 group:', group, motif_num)
pass
if sorted(group) in interesting_groups:
print('An interesting group:', group, motif_num)
if SAVE_COUNTED_MOTIFS:
h = hash(frozenset(group))
# h = frozenset(group)
if h in self._counted_motifs:
print("\033[91m Group {} already counted \033[00m".format(group))
self._double_counter[frozenset(group)] += 1
else:
self._counted_motifs.add(h)
self._update_nodes_group(group, motif_num)
if (i + 1) % 1000 == 0 and VERBOSE:
self._logger.debug("Groups: %d" % i)
# print('Max num of duplicates:', max(self._double_counter.values()))
# print('Number of motifs counted twice:', len(self._double_counter))
self._gnx = m_gnx
def _get_feature(self, element):
all_motifs = self._all_motifs.difference(set([None]))
cur_feature = self._features[element]
return np.array([cur_feature[motif_num] for motif_num in sorted(all_motifs)])
# consider ignoring node's data
class MotifsEdgeCalculator(MotifsNodeCalculator):
def __init__(self, *args, include_nodes=False, **kwargs):
self._edge_variations = {}
self._should_include_nodes = include_nodes
super(MotifsEdgeCalculator, self).__init__(*args, **kwargs)
def is_relevant(self):
# if graph is not directed, there is no use of edge variations
return self._gnx.is_directed()
def _calculate_motif_dictionaries(self):
# calculating the node variations
super(MotifsEdgeCalculator, self)._load_variations_file()
if not self._gnx.is_directed():
# if graph is not directed, there is no use of edge variations
return
motif_edges = list(permutations(range(self._level), 2))
# level * (level - 1) is number of permutations of size 2
num_edges = self._level * (self._level - 1)
for group_num, motif_num in self._node_variations.items():
bin_repr = BitArray(length=num_edges, int=group_num)
self._edge_variations[group_num] = set([edge_type for bit, edge_type in zip(bin_repr, motif_edges) if bit])
# noinspection PyMethodOverriding
def _calculate(self, include=None):
for group, group_num, motif_num in self._calculate_motif():
if self._should_include_nodes:
self._update_nodes_group(group, motif_num)
for edge_type in self._edge_variations[group_num]:
edge = tuple(map(lambda idx: group[idx], edge_type))
if edge not in self._features:
self._features[edge] = {motif_number: 0 for motif_number in self._all_motifs}
self._features[edge][motif_num] += 1
def nth_nodes_motif(motif_level):
return partial(MotifsNodeCalculator, level=motif_level)
def nth_edges_motif(motif_level):
return partial(MotifsNodeCalculator, level=motif_level)
feature_node_entry = {
"motif3": FeatureMeta(nth_nodes_motif(3), {"m3"}),
"motif4": FeatureMeta(nth_nodes_motif(4), {"m4"}),
}
feature_edge_entry = {
"motif3_edge": FeatureMeta(nth_edges_motif(3), {"me3"}),
"motif4_edge": FeatureMeta(nth_edges_motif(4), {"me4"}),
}
if __name__ | |
@property
def asset_parameters_expiration_date(self) -> str:
"""Relative expiration date."""
return self.__asset_parameters_expiration_date
@asset_parameters_expiration_date.setter
def asset_parameters_expiration_date(self, value: str):
self._property_changed('asset_parameters_expiration_date')
self.__asset_parameters_expiration_date = value
@property
def expiration(self) -> str:
"""The expiration date of the associated contract and the last date it trades."""
return self.__expiration
@expiration.setter
def expiration(self, value: str):
self._property_changed('expiration')
self.__expiration = value
@property
def country_name(self) -> str:
"""Country name for which FCI is calculated."""
return self.__country_name
@country_name.setter
def country_name(self, value: str):
self._property_changed('country_name')
self.__country_name = value
@property
def starting_date(self) -> str:
"""Start date of the period the valuation refers to."""
return self.__starting_date
@starting_date.setter
def starting_date(self, value: str):
self._property_changed('starting_date')
self.__starting_date = value
@property
def onboarded(self) -> bool:
"""Whether or not social domain has been onboarded."""
return self.__onboarded
@onboarded.setter
def onboarded(self, value: bool):
self._property_changed('onboarded')
self.__onboarded = value
@property
def liquidity_score(self) -> float:
"""Liquidity conditions in the aggregate market, calculated as the average of touch
liquidity score, touch spread score, and depth spread score."""
return self.__liquidity_score
@liquidity_score.setter
def liquidity_score(self, value: float):
self._property_changed('liquidity_score')
self.__liquidity_score = value
@property
def spread_leg2(self) -> float:
"""Spread of leg."""
return self.__spread_leg2
@spread_leg2.setter
def spread_leg2(self, value: float):
self._property_changed('spread_leg2')
self.__spread_leg2 = value
@property
def spread_leg1(self) -> float:
"""Spread of leg."""
return self.__spread_leg1
@spread_leg1.setter
def spread_leg1(self, value: float):
self._property_changed('spread_leg1')
self.__spread_leg1 = value
@property
def long_rates_contribution(self) -> float:
"""Contribution of long rate component to FCI."""
return self.__long_rates_contribution
@long_rates_contribution.setter
def long_rates_contribution(self, value: float):
self._property_changed('long_rates_contribution')
self.__long_rates_contribution = value
@property
def importance(self) -> float:
"""Importance."""
return self.__importance
@importance.setter
def importance(self, value: float):
self._property_changed('importance')
self.__importance = value
@property
def source_date_span(self) -> float:
"""Date span for event in days."""
return self.__source_date_span
@source_date_span.setter
def source_date_span(self, value: float):
self._property_changed('source_date_span')
self.__source_date_span = value
@property
def ann_yield6_month(self) -> float:
"""Calculates the total return for 6 months, representing past performance."""
return self.__ann_yield6_month
@ann_yield6_month.setter
def ann_yield6_month(self, value: float):
self._property_changed('ann_yield6_month')
self.__ann_yield6_month = value
@property
def underlying_data_set_id(self) -> str:
"""Dataset on which this (virtual) dataset is based."""
return self.__underlying_data_set_id
@underlying_data_set_id.setter
def underlying_data_set_id(self, value: str):
self._property_changed('underlying_data_set_id')
self.__underlying_data_set_id = value
@property
def close_unadjusted(self) -> float:
"""Unadjusted Close level of an asset based on official exchange fixing or
calculation agent marked level."""
return self.__close_unadjusted
@close_unadjusted.setter
def close_unadjusted(self, value: float):
self._property_changed('close_unadjusted')
self.__close_unadjusted = value
@property
def value_unit(self) -> str:
"""Value unit."""
return self.__value_unit
@value_unit.setter
def value_unit(self, value: str):
self._property_changed('value_unit')
self.__value_unit = value
@property
def quantity_unit(self) -> str:
"""Unit of measure for trade quantity."""
return self.__quantity_unit
@quantity_unit.setter
def quantity_unit(self, value: str):
self._property_changed('quantity_unit')
self.__quantity_unit = value
@property
def adjusted_low_price(self) -> float:
"""Adjusted low level of an asset based on official exchange fixing or calculation
agent marked level."""
return self.__adjusted_low_price
@adjusted_low_price.setter
def adjusted_low_price(self, value: float):
self._property_changed('adjusted_low_price')
self.__adjusted_low_price = value
@property
def net_exposure_classification(self) -> str:
"""Classification for net exposure of fund."""
return self.__net_exposure_classification
@net_exposure_classification.setter
def net_exposure_classification(self, value: str):
self._property_changed('net_exposure_classification')
self.__net_exposure_classification = value
@property
def settlement_method(self) -> str:
"""Settlement method of the swap."""
return self.__settlement_method
@settlement_method.setter
def settlement_method(self, value: str):
self._property_changed('settlement_method')
self.__settlement_method = value
@property
def long_conviction_large(self) -> float:
"""The count of long ideas with large conviction."""
return self.__long_conviction_large
@long_conviction_large.setter
def long_conviction_large(self, value: float):
self._property_changed('long_conviction_large')
self.__long_conviction_large = value
@property
def alpha(self) -> float:
"""Alpha."""
return self.__alpha
@alpha.setter
def alpha(self, value: float):
self._property_changed('alpha')
self.__alpha = value
@property
def company(self) -> str:
"""Activity user company."""
return self.__company
@company.setter
def company(self, value: str):
self._property_changed('company')
self.__company = value
@property
def conviction_list(self) -> bool:
"""Conviction List, which is true if the security is on the Conviction Buy List or
false otherwise. Securities with a convictionList value equal to true
are by definition a subset of the securities with a rating equal to
Buy."""
return self.__conviction_list
@conviction_list.setter
def conviction_list(self, value: bool):
self._property_changed('conviction_list')
self.__conviction_list = value
@property
def settlement_frequency(self) -> str:
"""Settlement Frequency provided by Participant (e.g., Monthly, Daily)."""
return self.__settlement_frequency
@settlement_frequency.setter
def settlement_frequency(self, value: str):
self._property_changed('settlement_frequency')
self.__settlement_frequency = value
@property
def dist_avg7_day(self) -> float:
"""Goldman custom calculated value, only used for GS onshore Money Market Funds,
assumes sum of the past 7 days divided by 7 and expressed as a
percent."""
return self.__dist_avg7_day
@dist_avg7_day.setter
def dist_avg7_day(self, value: float):
self._property_changed('dist_avg7_day')
self.__dist_avg7_day = value
@property
def remove_tape_c(self) -> float:
"""Goldman's rate for liquidity removing trades on tape C."""
return self.__remove_tape_c
@remove_tape_c.setter
def remove_tape_c(self, value: float):
self._property_changed('remove_tape_c')
self.__remove_tape_c = value
@property
def remove_tape_b(self) -> float:
"""Goldman's rate for liquidity removing trades on tape B."""
return self.__remove_tape_b
@remove_tape_b.setter
def remove_tape_b(self, value: float):
self._property_changed('remove_tape_b')
self.__remove_tape_b = value
@property
def in_risk_model(self) -> bool:
"""Whether or not the asset is in the risk model universe."""
return self.__in_risk_model
@in_risk_model.setter
def in_risk_model(self, value: bool):
self._property_changed('in_risk_model')
self.__in_risk_model = value
@property
def daily_net_shareholder_flows_percent(self) -> float:
"""Percent of assets paid daily."""
return self.__daily_net_shareholder_flows_percent
@daily_net_shareholder_flows_percent.setter
def daily_net_shareholder_flows_percent(self, value: float):
self._property_changed('daily_net_shareholder_flows_percent')
self.__daily_net_shareholder_flows_percent = value
@property
def type_of_return(self) -> str:
"""The type of return for the commodity index. Only applicable for commodity
indices."""
return self.__type_of_return
@type_of_return.setter
def type_of_return(self, value: str):
self._property_changed('type_of_return')
self.__type_of_return = value
@property
def servicing_cost_long_pnl(self) -> float:
"""Servicing Cost Long Profit and Loss."""
return self.__servicing_cost_long_pnl
@servicing_cost_long_pnl.setter
def servicing_cost_long_pnl(self, value: float):
self._property_changed('servicing_cost_long_pnl')
self.__servicing_cost_long_pnl = value
@property
def excess_margin_percentage(self) -> float:
"""Available credit percentage."""
return self.__excess_margin_percentage
@excess_margin_percentage.setter
def excess_margin_percentage(self, value: float):
self._property_changed('excess_margin_percentage')
self.__excess_margin_percentage = value
@property
def remove_tape_a(self) -> float:
"""Goldman's rate for liquidity removing trades on tape A."""
return self.__remove_tape_a
@remove_tape_a.setter
def remove_tape_a(self, value: float):
self._property_changed('remove_tape_a')
self.__remove_tape_a = value
@property
def meeting_number(self) -> float:
"""Central bank meeting number."""
return self.__meeting_number
@meeting_number.setter
def meeting_number(self, value: float):
self._property_changed('meeting_number')
self.__meeting_number = value
@property
def exchange_id(self) -> str:
"""Unique identifier for an exchange."""
return self.__exchange_id
@exchange_id.setter
def exchange_id(self, value: str):
self._property_changed('exchange_id')
self.__exchange_id = value
@property
def mid_gspread(self) -> float:
"""Mid G spread."""
return self.__mid_gspread
@mid_gspread.setter
def mid_gspread(self, value: float):
self._property_changed('mid_gspread')
self.__mid_gspread = value
@property
def tcm_cost_horizon20_day(self) -> float:
"""TCM cost with a 20 day time horizon."""
return self.__tcm_cost_horizon20_day
@tcm_cost_horizon20_day.setter
def tcm_cost_horizon20_day(self, value: float):
self._property_changed('tcm_cost_horizon20_day')
self.__tcm_cost_horizon20_day = value
@property
def long_level(self) -> float:
"""Level of the 5-day normalized flow for long selling/buying."""
return self.__long_level
@long_level.setter
def long_level(self, value: float):
self._property_changed('long_level')
self.__long_level = value
@property
def realm(self) -> str:
"""Realm."""
return self.__realm
@realm.setter
def realm(self, value: str):
self._property_changed('realm')
self.__realm = value
@property
def bid(self) -> float:
"""Latest Bid Price (price willing to buy)."""
return self.__bid
@bid.setter
def bid(self, value: float):
self._property_changed('bid')
self.__bid = value
@property
def is_aggressive(self) -> float:
"""Indicates if the fill was aggressive or passive."""
return self.__is_aggressive
@is_aggressive.setter
def is_aggressive(self, value: float):
self._property_changed('is_aggressive')
self.__is_aggressive = value
@property
def order_id(self) -> str:
"""The unique ID of the order."""
return self.__order_id
@order_id.setter
def order_id(self, value: str):
self._property_changed('order_id')
self.__order_id = value
@property
def repo_rate(self) -> float:
"""Repurchase Rate."""
return self.__repo_rate
@repo_rate.setter
def repo_rate(self, value: float):
self._property_changed('repo_rate')
self.__repo_rate = value
@property
def market_cap_usd(self) -> float:
"""Market capitalization of a given asset denominated in USD."""
return self.__market_cap_usd
@market_cap_usd.setter
def market_cap_usd(self, value: float):
self._property_changed('market_cap_usd')
self.__market_cap_usd = value
@property
def high_price(self) -> float:
"""High level of an asset based on official exchange fixing or calculation agent
marked level."""
return self.__high_price
@high_price.setter
def high_price(self, value: float):
self._property_changed('high_price')
self.__high_price = value
@property
def absolute_shares(self) -> float:
"""The number of shares without adjusting for side."""
return self.__absolute_shares
@absolute_shares.setter
def absolute_shares(self, value: float):
self._property_changed('absolute_shares')
self.__absolute_shares = value
@property
def action(self) -> str:
"""The activity action. For example: Viewed"""
return self.__action
@action.setter
def action(self, value: str):
self._property_changed('action')
self.__action = value
@property
def model(self) -> str:
"""Model."""
return self.__model
@model.setter
def model(self, value: str):
self._property_changed('model')
self.__model = value
@property
def equity_risk_premia(self) -> float:
"""Equity risk premium: difference between cost of equity and 10y treasury yield."""
return self.__equity_risk_premia
@equity_risk_premia.setter
def equity_risk_premia(self, value: float):
self._property_changed('equity_risk_premia')
self.__equity_risk_premia = value
@property
def id(self) -> str:
"""Marquee unique identifier"""
return self.__id
@id.setter
def id(self, value: str):
self._property_changed('id')
self.__id = value
@property
def arrival_haircut_vwap_normalized(self) -> float:
"""Performance against Benchmark in pip."""
return self.__arrival_haircut_vwap_normalized
@arrival_haircut_vwap_normalized.setter
def arrival_haircut_vwap_normalized(self, value: float):
self._property_changed('arrival_haircut_vwap_normalized')
self.__arrival_haircut_vwap_normalized = value | |
# coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class TypesGroup(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'group_id': 'str',
'group_name': 'str',
'last_modified': 'datetime',
'ldap_group': 'bool',
'oauth_group': 'bool',
'oidc_group': 'bool',
'owner': 'str',
'permissions': 'list[ApiPermission]',
'role': 'str',
'saml_group': 'bool',
'user': 'list[SharedUser]'
}
attribute_map = {
'id': '_id',
'group_id': 'groupId',
'group_name': 'groupName',
'last_modified': 'lastModified',
'ldap_group': 'ldapGroup',
'oauth_group': 'oauthGroup',
'oidc_group': 'oidcGroup',
'owner': 'owner',
'permissions': 'permissions',
'role': 'role',
'saml_group': 'samlGroup',
'user': 'user'
}
def __init__(self, id=None, group_id=None, group_name=None, last_modified=None, ldap_group=None, oauth_group=None, oidc_group=None, owner=None, permissions=None, role=None, saml_group=None, user=None, local_vars_configuration=None): # noqa: E501
"""TypesGroup - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._group_id = None
self._group_name = None
self._last_modified = None
self._ldap_group = None
self._oauth_group = None
self._oidc_group = None
self._owner = None
self._permissions = None
self._role = None
self._saml_group = None
self._user = None
self.discriminator = None
if id is not None:
self.id = id
if group_id is not None:
self.group_id = group_id
if group_name is not None:
self.group_name = group_name
if last_modified is not None:
self.last_modified = last_modified
if ldap_group is not None:
self.ldap_group = ldap_group
if oauth_group is not None:
self.oauth_group = oauth_group
if oidc_group is not None:
self.oidc_group = oidc_group
if owner is not None:
self.owner = owner
if permissions is not None:
self.permissions = permissions
if role is not None:
self.role = role
if saml_group is not None:
self.saml_group = saml_group
if user is not None:
self.user = user
@property
def id(self):
"""Gets the id of this TypesGroup. # noqa: E501
Group name. # noqa: E501
:return: The id of this TypesGroup. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TypesGroup.
Group name. # noqa: E501
:param id: The id of this TypesGroup. # noqa: E501
:type id: str
"""
self._id = id
@property
def group_id(self):
"""Gets the group_id of this TypesGroup. # noqa: E501
Group identifier in the Azure SAML identification process. # noqa: E501
:return: The group_id of this TypesGroup. # noqa: E501
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this TypesGroup.
Group identifier in the Azure SAML identification process. # noqa: E501
:param group_id: The group_id of this TypesGroup. # noqa: E501
:type group_id: str
"""
self._group_id = group_id
@property
def group_name(self):
"""Gets the group_name of this TypesGroup. # noqa: E501
Group name. # noqa: E501
:return: The group_name of this TypesGroup. # noqa: E501
:rtype: str
"""
return self._group_name
@group_name.setter
def group_name(self, group_name):
"""Sets the group_name of this TypesGroup.
Group name. # noqa: E501
:param group_name: The group_name of this TypesGroup. # noqa: E501
:type group_name: str
"""
self._group_name = group_name
@property
def last_modified(self):
"""Gets the last_modified of this TypesGroup. # noqa: E501
Datetime when the group was created or last modified. # noqa: E501
:return: The last_modified of this TypesGroup. # noqa: E501
:rtype: datetime
"""
return self._last_modified
@last_modified.setter
def last_modified(self, last_modified):
"""Sets the last_modified of this TypesGroup.
Datetime when the group was created or last modified. # noqa: E501
:param last_modified: The last_modified of this TypesGroup. # noqa: E501
:type last_modified: datetime
"""
self._last_modified = last_modified
@property
def ldap_group(self):
"""Gets the ldap_group of this TypesGroup. # noqa: E501
Indicates if the group is an LDAP group (true) or not (false). # noqa: E501
:return: The ldap_group of this TypesGroup. # noqa: E501
:rtype: bool
"""
return self._ldap_group
@ldap_group.setter
def ldap_group(self, ldap_group):
"""Sets the ldap_group of this TypesGroup.
Indicates if the group is an LDAP group (true) or not (false). # noqa: E501
:param ldap_group: The ldap_group of this TypesGroup. # noqa: E501
:type ldap_group: bool
"""
self._ldap_group = ldap_group
@property
def oauth_group(self):
"""Gets the oauth_group of this TypesGroup. # noqa: E501
Indicates if the group is an OAuth group (true) or not (false). # noqa: E501
:return: The oauth_group of this TypesGroup. # noqa: E501
:rtype: bool
"""
return self._oauth_group
@oauth_group.setter
def oauth_group(self, oauth_group):
"""Sets the oauth_group of this TypesGroup.
Indicates if the group is an OAuth group (true) or not (false). # noqa: E501
:param oauth_group: The oauth_group of this TypesGroup. # noqa: E501
:type oauth_group: bool
"""
self._oauth_group = oauth_group
@property
def oidc_group(self):
"""Gets the oidc_group of this TypesGroup. # noqa: E501
Indicates if the group is an OpenID Connect group (true) or not (false). # noqa: E501
:return: The oidc_group of this TypesGroup. # noqa: E501
:rtype: bool
"""
return self._oidc_group
@oidc_group.setter
def oidc_group(self, oidc_group):
"""Sets the oidc_group of this TypesGroup.
Indicates if the group is an OpenID Connect group (true) or not (false). # noqa: E501
:param oidc_group: The oidc_group of this TypesGroup. # noqa: E501
:type oidc_group: bool
"""
self._oidc_group = oidc_group
@property
def owner(self):
"""Gets the owner of this TypesGroup. # noqa: E501
User who created or modified the group. # noqa: E501
:return: The owner of this TypesGroup. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this TypesGroup.
User who created or modified the group. # noqa: E501
:param owner: The owner of this TypesGroup. # noqa: E501
:type owner: str
"""
self._owner = owner
@property
def permissions(self):
"""Gets the permissions of this TypesGroup. # noqa: E501
Permissions is a list of permissions # noqa: E501
:return: The permissions of this TypesGroup. # noqa: E501
:rtype: list[ApiPermission]
"""
return self._permissions
@permissions.setter
def permissions(self, permissions):
"""Sets the permissions of this TypesGroup.
Permissions is a list of permissions # noqa: E501
:param permissions: The permissions of this TypesGroup. # noqa: E501
:type permissions: list[ApiPermission]
"""
self._permissions = permissions
@property
def role(self):
"""Gets the role of this TypesGroup. # noqa: E501
Role of the group. # noqa: E501
:return: The role of this TypesGroup. # noqa: E501
:rtype: str
"""
return self._role
@role.setter
def role(self, role):
"""Sets the role of this TypesGroup.
Role of the group. # noqa: E501
:param role: The role of this TypesGroup. # noqa: E501
:type role: str
"""
self._role = role
@property
def saml_group(self):
"""Gets the saml_group of this TypesGroup. # noqa: E501
Indicates if the group is a SAML group (true) or not (false). # noqa: E501
:return: The saml_group of this TypesGroup. # noqa: E501
:rtype: bool
"""
return self._saml_group
@saml_group.setter
def saml_group(self, saml_group):
"""Sets the saml_group of this TypesGroup.
Indicates if the group is a SAML group (true) or not (false). # noqa: E501
:param saml_group: The saml_group of this TypesGroup. # noqa: E501
:type saml_group: bool
"""
self._saml_group = saml_group
@property
def user(self):
"""Gets the user of this TypesGroup. # noqa: E501
Users in the group. # noqa: E501
:return: The user of this TypesGroup. # noqa: E501
:rtype: list[SharedUser]
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this TypesGroup.
Users in the group. # noqa: E501
:param user: The user of this TypesGroup. # noqa: E501
:type user: list[SharedUser]
"""
self._user = user
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, | |
'61342427':{'en': 'Geelong'},
'61342428':{'en': 'Kennedys Creek'},
'61342429':{'en': 'Kennedys Creek'},
'6134243':{'en': 'Geelong'},
'61342440':{'en': 'Queenscliff'},
'61342441':{'en': 'Geelong'},
'61342442':{'en': 'Geelong'},
'61342443':{'en': '<NAME>'},
'61342444':{'en': '<NAME>'},
'61342445':{'en': '<NAME>'},
'61342446':{'en': 'Torquay'},
'61342447':{'en': 'Colac'},
'61342448':{'en': 'Geelong'},
'61342449':{'en': 'Geelong'},
'6134245':{'en': 'Geelong'},
'61342460':{'en': 'Geelong'},
'61342461':{'en': 'Geelong'},
'6134300':{'en': 'Horsham'},
'61343010':{'en': 'Stawell'},
'61343011':{'en': 'Daylesford'},
'61343012':{'en': 'Mount Wallace'},
'61343013':{'en': 'Mount Wallace'},
'61343014':{'en': 'Bacchus Marsh'},
'61343015':{'en': 'Ballarat'},
'61343016':{'en': 'Ballarat'},
'613430173':{'en': 'Ballarat'},
'61343018':{'en': 'Yaapeet'},
'61343019':{'en': 'Yaapeet'},
'6134302':{'en': 'Bacchus Marsh'},
'6134303':{'en': 'Ballarat'},
'6134304':{'en': 'Ballarat'},
'61343050':{'en': 'Banyena'},
'61343051':{'en': 'Banyena'},
'61343052':{'en': 'Beulah'},
'61343053':{'en': 'Beulah'},
'61343054':{'en': 'Clear Lake'},
'61343055':{'en': 'Clear Lake'},
'61343056':{'en': 'Crymelon'},
'61343057':{'en': 'Crymelon'},
'61343058':{'en': '<NAME>'},
'61343059':{'en': '<NAME>'},
'61343060':{'en': 'Goroke'},
'61343061':{'en': 'Goroke'},
'61343062':{'en': 'Jeparit'},
'61343063':{'en': 'Jeparit'},
'61343064':{'en': 'Minimay'},
'61343065':{'en': 'Minimay'},
'61343066':{'en': 'Minyip'},
'61343067':{'en': 'Minyip'},
'61343068':{'en': 'Rainbow'},
'61343069':{'en': 'Rainbow'},
'61343070':{'en': 'Broughton'},
'61343071':{'en': 'Broughton'},
'61343072':{'en': 'Lorquon'},
'61343073':{'en': 'Lorquon'},
'61343074':{'en': 'Serviceton'},
'61343075':{'en': 'Serviceton'},
'61343076':{'en': 'Telopea Downs'},
'61343077':{'en': 'Telopea Downs'},
'61343078':{'en': 'Marnoo'},
'61343079':{'en': 'Marnoo'},
'6134308':{'en': 'Ballarat'},
'61343090':{'en': 'Ballarat'},
'61343091':{'en': 'Bacchus Marsh'},
'61343092':{'en': 'Bacchus Marsh'},
'61343093':{'en': 'Ararat'},
'61343094':{'en': 'Ballarat'},
'61343095':{'en': 'Stawell'},
'61343096':{'en': 'Horsham'},
'61343097':{'en': 'Bacchus Marsh'},
'61343098':{'en': 'Stawell'},
'61343099':{'en': 'Ararat'},
'61343100':{'en': 'Bacchus Marsh'},
'61343101':{'en': 'Horsham'},
'61343102':{'en': 'Ballarat'},
'61343103':{'en': 'Ballan'},
'61343104':{'en': 'Beaufort'},
'61343105':{'en': 'Horsham'},
'61343106':{'en': 'Ballarat'},
'61343107':{'en': 'Ballarat'},
'61343108':{'en': 'Bacchus Marsh'},
'61343109':{'en': 'Creswick'},
'61343110':{'en': 'Creswick'},
'61343111':{'en': 'Ballarat'},
'61343112':{'en': 'Halls Gap'},
'61343113':{'en': 'Buninyong'},
'61343114':{'en': 'Ballarat'},
'613431150':{'en': 'Ararat'},
'613431151':{'en': '<NAME>'},
'613431152':{'en': 'Ballan'},
'613431153':{'en': 'Ballarat'},
'613431154':{'en': 'Balliang'},
'613431155':{'en': 'Bangerang'},
'613431156':{'en': 'Banyena'},
'613431157':{'en': 'Beaufort'},
'613431158':{'en': 'Beulah'},
'613431159':{'en': 'Broughton'},
'613431160':{'en': 'Buangor'},
'613431161':{'en': 'Buninyong'},
'613431162':{'en': 'Clear Lake'},
'613431163':{'en': 'Creswick'},
'613431164':{'en': 'Crymelon'},
'613431165':{'en': '<NAME>'},
'613431166':{'en': 'Daylesford'},
'613431167':{'en': 'Dimboola'},
'613431168':{'en': 'Elmhurst'},
'613431169':{'en': '<NAME>'},
'613431170':{'en': 'Glenisla'},
'613431171':{'en': 'Glenorchy'},
'613431172':{'en': 'Goroke'},
'613431173':{'en': '<NAME>'},
'613431174':{'en': 'Horsham'},
'613431175':{'en': 'Jeparit'},
'613431176':{'en': 'Kalkee'},
'613431177':{'en': 'Kaniva'},
'613431178':{'en': 'Laharum'},
'613431179':{'en': 'Lake Bolac'},
'613431180':{'en': 'Landsborough'},
'613431181':{'en': 'Learmonth'},
'613431182':{'en': 'Linton'},
'613431183':{'en': 'Lorquon'},
'613431184':{'en': 'Marnoo'},
'613431185':{'en': 'Maroona'},
'613431186':{'en': 'Minimay'},
'613431187':{'en': 'Minyip'},
'613431188':{'en': 'Mount Wallace'},
'613431189':{'en': 'Moyston'},
'613431190':{'en': 'Murtoa'},
'613431191':{'en': 'Natimuk'},
'613431192':{'en': 'Navarre'},
'613431193':{'en': 'Nhill'},
'613431194':{'en': 'Polkemmet'},
'613431195':{'en': 'Rainbow'},
'613431196':{'en': 'Rokewood'},
'613431197':{'en': 'Scarsdale'},
'613431198':{'en': 'Serviceton'},
'613431199':{'en': 'Skipton'},
'613431200':{'en': 'Stawell'},
'613431201':{'en': 'Stoneleigh'},
'613431202':{'en': 'Streatham'},
'613431203':{'en': '<NAME>'},
'613431204':{'en': 'Warracknabeal'},
'613431205':{'en': 'Wilkur'},
'613431206':{'en': 'Willaura'},
'613431207':{'en': 'Yaapeet'},
'613431208':{'en': 'Ararat'},
'613431209':{'en': '<NAME>'},
'61343121':{'en': 'Rokewood'},
'61343122':{'en': 'Bangerang'},
'61343123':{'en': 'Banyena'},
'61343124':{'en': 'Beaufort'},
'61343125':{'en': 'Beulah'},
'61343126':{'en': 'Broughton'},
'61343127':{'en': 'Buangor'},
'61343128':{'en': 'Buninyong'},
'61343129':{'en': 'Clear Lake'},
'6134313':{'en': 'Ballarat'},
'61343130':{'en': 'Creswick'},
'61343131':{'en': 'Crymelon'},
'61343140':{'en': 'Dimboola'},
'61343141':{'en': 'Elmhurst'},
'61343142':{'en': '<NAME>'},
'61343143':{'en': 'Glenorchy'},
'61343144':{'en': 'Goroke'},
'61343145':{'en': 'Jeparit'},
'61343146':{'en': 'Kalkee'},
'61343147':{'en': 'Laharum'},
'61343148':{'en': 'Lake Bolac'},
'61343149':{'en': 'Landsborough'},
'61343150':{'en': 'Learmonth'},
'61343151':{'en': 'Linton'},
'61343152':{'en': 'Lorquon'},
'61343153':{'en': 'Marnoo'},
'61343154':{'en': 'Minyip'},
'61343155':{'en': 'Mount Wallace'},
'61343156':{'en': 'Moyston'},
'61343157':{'en': 'Murtoa'},
'61343158':{'en': 'Natimuk'},
'61343159':{'en': 'Navarre'},
'61343160':{'en': 'Skipton'},
'61343161':{'en': 'Stawell'},
'61343162':{'en': 'Stoneleigh'},
'61343163':{'en': 'Streatham'},
'61343164':{'en': '<NAME>'},
'61343165':{'en': 'Wilkur'},
'61343166':{'en': 'Willaura'},
'61343167':{'en': 'Yaapeet'},
'61343168':{'en': 'Kaniva'},
'613431690':{'en': 'Ballan'},
'613431691':{'en': 'Ballarat'},
'613431692':{'en': 'Balliang'},
'613431693':{'en': 'Bangerang'},
'613431694':{'en': 'Banyena'},
'613431695':{'en': 'Beaufort'},
'613431696':{'en': 'Beulah'},
'613431697':{'en': 'Broughton'},
'613431698':{'en': 'Buangor'},
'613431699':{'en': 'Buninyong'},
'613431700':{'en': 'Clear Lake'},
'613431701':{'en': 'Creswick'},
'613431702':{'en': 'Crymelon'},
'613431703':{'en': '<NAME>'},
'613431704':{'en': 'Daylesford'},
'613431705':{'en': 'Dimboola'},
'613431706':{'en': 'Elmhurst'},
'613431707':{'en': '<NAME>'},
'613431708':{'en': 'Glenisla'},
'613431709':{'en': 'Glenorchy'},
'613431710':{'en': 'Goroke'},
'613431711':{'en': '<NAME>'},
'613431712':{'en': 'Horsham'},
'613431713':{'en': 'Jeparit'},
'613431714':{'en': 'Kalkee'},
'613431715':{'en': 'Kaniva'},
'613431716':{'en': 'Laharum'},
'613431717':{'en': 'Lake Bolac'},
'613431718':{'en': 'Landsborough'},
'613431719':{'en': 'Learmonth'},
'613431720':{'en': 'Linton'},
'613431721':{'en': 'Lorquon'},
'613431722':{'en': 'Marnoo'},
'613431723':{'en': 'Maroona'},
'613431724':{'en': 'Minimay'},
'613431725':{'en': 'Minyip'},
'613431726':{'en': 'Mount Wallace'},
'613431727':{'en': 'Moyston'},
'613431728':{'en': 'Murtoa'},
'613431729':{'en': 'Natimuk'},
'613431730':{'en': 'Navarre'},
'613431731':{'en': 'Nhill'},
'613431732':{'en': 'Polkemmet'},
'613431733':{'en': 'Rainbow'},
'613431734':{'en': 'Rokewood'},
'613431735':{'en': 'Scarsdale'},
'613431736':{'en': 'Serviceton'},
'613431737':{'en': 'Skipton'},
'613431738':{'en': 'Stawell'},
'613431739':{'en': 'Stoneleigh'},
'613431740':{'en': 'Streatham'},
'613431741':{'en': '<NAME>'},
'613431742':{'en': 'Warracknabeal'},
'613431743':{'en': 'Wilkur'},
'613431744':{'en': 'Willaura'},
'613431745':{'en': 'Yaapeet'},
'613431746':{'en': 'Ararat'},
'613431747':{'en': 'Bacchus Marsh'},
'613431748':{'en': 'Ballan'},
'613431749':{'en': 'Ballarat'},
'61343175':{'en': 'Scarsdale'},
'613431760':{'en': 'Balliang'},
'613431761':{'en': 'Bangerang'},
'613431762':{'en': 'Banyena'},
'613431763':{'en': 'Beaufort'},
'613431764':{'en': 'Beulah'},
'613431765':{'en': 'Broughton'},
'613431766':{'en': 'Buangor'},
'613431767':{'en': 'Buninyong'},
'613431768':{'en': 'Clear Lake'},
'613431769':{'en': 'Creswick'},
'613431770':{'en': 'Crymelon'},
'613431771':{'en': 'Dadswells Bridge'},
'613431772':{'en': 'Daylesford'},
'613431773':{'en': 'Dimboola'},
'613431774':{'en': 'Elmhurst'},
'613431775':{'en': '<NAME>'},
'613431776':{'en': 'Glenisla'},
'613431777':{'en': 'Glenorchy'},
'613431778':{'en': 'Goroke'},
'613431779':{'en': 'Halls Gap'},
'613431780':{'en': 'Horsham'},
'613431781':{'en': 'Jeparit'},
'613431782':{'en': 'Kalkee'},
'613431783':{'en': 'Kaniva'},
'613431784':{'en': 'Laharum'},
'613431785':{'en': 'Lake Bolac'},
'613431786':{'en': 'Landsborough'},
'613431787':{'en': 'Learmonth'},
'613431788':{'en': 'Linton'},
'613431789':{'en': 'Lorquon'},
'613431790':{'en': 'Marnoo'},
'613431791':{'en': 'Maroona'},
'613431792':{'en': 'Minimay'},
'613431793':{'en': 'Minyip'},
'613431794':{'en': 'Mount Wallace'},
'613431795':{'en': 'Moyston'},
'613431796':{'en': 'Murtoa'},
'613431797':{'en': 'Natimuk'},
'613431798':{'en': 'Navarre'},
'613431799':{'en': 'Nhill'},
'613431800':{'en': 'Polkemmet'},
'613431801':{'en': 'Rainbow'},
'613431802':{'en': 'Rokewood'},
'613431803':{'en': 'Scarsdale'},
'613431804':{'en': 'Serviceton'},
'613431805':{'en': 'Skipton'},
'613431806':{'en': 'Stawell'},
'613431807':{'en': 'Stoneleigh'},
'613431808':{'en': 'Streatham'},
'613431809':{'en': '<NAME>'},
'613431810':{'en': 'Warracknabeal'},
'613431811':{'en': 'Wilkur'},
'613431812':{'en': 'Willaura'},
'613431813':{'en': 'Yaapeet'},
'613431814':{'en': 'Ararat'},
'613431815':{'en': '<NAME>'},
'613431816':{'en': 'Ballan'},
'613431817':{'en': 'Ballarat'},
'613431818':{'en': 'Balliang'},
'613431819':{'en': 'Bangerang'},
'61343182':{'en': 'Murtoa'},
'61343183':{'en': 'Linton'},
'61343184':{'en': 'Daylesford'},
'61343185':{'en': 'Scarsdale'},
'61343186':{'en': 'Scarsdale'},
'61343187':{'en': 'Scarsdale'},
'613431880':{'en': 'Banyena'},
'613431881':{'en': 'Beaufort'},
'613431882':{'en': 'Ararat'},
'613431883':{'en': '<NAME>'},
'613431884':{'en': 'Ballan'},
'613431885':{'en': 'Ballarat'},
'613431886':{'en': 'Balliang'},
'613431887':{'en': 'Bangerang'},
'613431888':{'en': 'Banyena'},
'613431889':{'en': 'Beaufort'},
'613431890':{'en': 'Beulah'},
'613431891':{'en': 'Broughton'},
'613431892':{'en': 'Buangor'},
'613431893':{'en': 'Buninyong'},
'613431894':{'en': 'Clear Lake'},
'613431895':{'en': 'Creswick'},
'613431896':{'en': 'Crymelon'},
'613431897':{'en': '<NAME>'},
'613431898':{'en': 'Daylesford'},
'613431899':{'en': 'Dimboola'},
'613431900':{'en': 'Elmhurst'},
'613431901':{'en': '<NAME>'},
'613431902':{'en': 'Glenisla'},
'613431903':{'en': 'Glenorchy'},
'613431904':{'en': 'Goroke'},
'613431905':{'en': 'Halls Gap'},
'613431906':{'en': 'Horsham'},
'613431907':{'en': 'Jeparit'},
'613431908':{'en': 'Kalkee'},
'613431909':{'en': 'Kaniva'},
'613431910':{'en': 'Laharum'},
'613431911':{'en': 'Lake Bolac'},
'613431912':{'en': 'Landsborough'},
'613431913':{'en': 'Learmonth'},
'613431914':{'en': 'Linton'},
'613431915':{'en': 'Lorquon'},
'613431916':{'en': 'Marnoo'},
'613431917':{'en': 'Maroona'},
'613431918':{'en': 'Minimay'},
'613431919':{'en': 'Minyip'},
'613431920':{'en': 'Mount Wallace'},
'613431921':{'en': 'Moyston'},
'613431922':{'en': 'Murtoa'},
'613431923':{'en': 'Natimuk'},
'613431924':{'en': 'Navarre'},
'613431925':{'en': 'Nhill'},
'613431926':{'en': 'Polkemmet'},
'613431927':{'en': 'Rainbow'},
'613431928':{'en': 'Rokewood'},
'613431929':{'en': 'Scarsdale'},
'613431930':{'en': 'Serviceton'},
'613431931':{'en': 'Skipton'},
'613431932':{'en': 'Stawell'},
'613431933':{'en': 'Stoneleigh'},
'613431934':{'en': 'Streatham'},
'613431935':{'en': '<NAME>'},
'613431936':{'en': 'Warracknabeal'},
'613431937':{'en': 'Wilkur'},
'613431938':{'en': 'Willaura'},
'613431939':{'en': 'Yaapeet'},
'61343194':{'en': '<NAME>'},
'613431950':{'en': 'Beulah'},
'613431951':{'en': 'Broughton'},
'613431952':{'en': 'Buangor'},
'613431953':{'en': 'Buninyong'},
'613431954':{'en': '<NAME>'},
'613431955':{'en': 'Creswick'},
'613431956':{'en': 'Crymelon'},
'613431957':{'en': '<NAME>'},
'613431958':{'en': 'Daylesford'},
'613431959':{'en': 'Dimboola'},
'613431960':{'en': 'Elmhurst'},
'613431961':{'en': '<NAME>'},
'613431962':{'en': 'Glenisla'},
'613431963':{'en': 'Glenorchy'},
'613431964':{'en': 'Goroke'},
'613431965':{'en': 'Halls Gap'},
'613431966':{'en': 'Horsham'},
'613431967':{'en': 'Jeparit'},
'613431968':{'en': 'Kalkee'},
'613431969':{'en': 'Kaniva'},
'613431970':{'en': 'Laharum'},
'613431971':{'en': 'Lake Bolac'},
'613431972':{'en': 'Landsborough'},
'613431973':{'en': 'Learmonth'},
'613431974':{'en': 'Linton'},
'613431975':{'en': 'Lorquon'},
'613431976':{'en': 'Marnoo'},
'613431977':{'en': 'Maroona'},
'613431978':{'en': 'Minimay'},
'613431979':{'en': 'Minyip'},
'613431980':{'en': 'Mount Wallace'},
'613431981':{'en': 'Moyston'},
'613431982':{'en': 'Murtoa'},
'613431983':{'en': 'Natimuk'},
'613431984':{'en': 'Navarre'},
'613431985':{'en': 'Nhill'},
'613431986':{'en': 'Polkemmet'},
'613431987':{'en': 'Rainbow'},
'613431988':{'en': 'Rokewood'},
'613431989':{'en': 'Scarsdale'},
'613431990':{'en': 'Serviceton'},
'613431991':{'en': 'Skipton'},
'613431992':{'en': 'Stawell'},
'613431993':{'en': 'Stoneleigh'},
'613431994':{'en': 'Streatham'},
'613431995':{'en': '<NAME>'},
'613431996':{'en': 'Warracknabeal'},
'613431997':{'en': 'Wilkur'},
'613431998':{'en': 'Willaura'},
'613431999':{'en': 'Yaapeet'},
'613432000':{'en': 'Ararat'},
'613432001':{'en': '<NAME>'},
'613432002':{'en': 'Ballan'},
'613432003':{'en': 'Ballarat'},
'613432004':{'en': 'Balliang'},
'613432005':{'en': 'Bangerang'},
'613432006':{'en': 'Banyena'},
'613432007':{'en': 'Beaufort'},
'613432008':{'en': 'Beulah'},
'613432009':{'en': 'Broughton'},
'613432010':{'en': 'Buangor'},
'613432011':{'en': 'Buninyong'},
'613432012':{'en': 'Clear Lake'},
'613432013':{'en': 'Creswick'},
'613432014':{'en': 'Crymelon'},
'613432015':{'en': '<NAME>'},
'613432016':{'en': 'Daylesford'},
'613432017':{'en': 'Dimboola'},
'613432018':{'en': 'Elmhurst'},
'613432019':{'en': '<NAME>'},
'613432020':{'en': 'Glenisla'},
'613432021':{'en': 'Glenorchy'},
'613432022':{'en': 'Goroke'},
'613432023':{'en': '<NAME>'},
'613432024':{'en': 'Horsham'},
'613432025':{'en': 'Jeparit'},
'613432026':{'en': 'Kalkee'},
'613432027':{'en': 'Kaniva'},
'613432028':{'en': 'Laharum'},
'613432029':{'en': '<NAME>'},
'613432030':{'en': 'Landsborough'},
'613432031':{'en': 'Learmonth'},
'613432032':{'en': 'Linton'},
'613432033':{'en': 'Lorquon'},
'613432034':{'en': 'Marnoo'},
'613432035':{'en': 'Maroona'},
'613432036':{'en': 'Minimay'},
'613432037':{'en': 'Minyip'},
'613432038':{'en': 'Mount Wallace'},
'613432039':{'en': 'Moyston'},
'613432040':{'en': 'Murtoa'},
'613432041':{'en': 'Natimuk'},
'613432042':{'en': 'Navarre'},
'613432043':{'en': 'Nhill'},
'613432044':{'en': 'Polkemmet'},
'613432045':{'en': 'Rainbow'},
'613432046':{'en': 'Rokewood'},
'613432047':{'en': 'Scarsdale'},
'613432048':{'en': 'Serviceton'},
'613432049':{'en': 'Skipton'},
'613432050':{'en': 'Stawell'},
'613432051':{'en': 'Stoneleigh'},
'613432052':{'en': 'Streatham'},
'613432053':{'en': '<NAME>'},
'613432054':{'en': 'Warracknabeal'},
'613432055':{'en': 'Wilkur'},
'613432056':{'en': 'Willaura'},
'613432057':{'en': 'Yaapeet'},
'613432058':{'en': 'Ararat'},
'613432059':{'en': '<NAME>'},
'613432060':{'en': 'Ballan'},
'613432061':{'en': 'Ballarat'},
'613432062':{'en': 'Balliang'},
'613432063':{'en': 'Bangerang'},
'613432064':{'en': 'Banyena'},
'613432065':{'en': 'Beaufort'},
'613432066':{'en': 'Beulah'},
'613432067':{'en': 'Broughton'},
'613432068':{'en': 'Buangor'},
'613432069':{'en': 'Buninyong'},
'613432070':{'en': 'Clear Lake'},
'613432071':{'en': 'Creswick'},
'613432072':{'en': 'Crymelon'},
'613432073':{'en': 'Dadswells Bridge'},
'613432074':{'en': 'Daylesford'},
'613432075':{'en': 'Dimboola'},
'613432076':{'en': 'Elmhurst'},
'613432077':{'en': '<NAME>'},
'613432078':{'en': 'Glenisla'},
'613432079':{'en': 'Glenorchy'},
'613432080':{'en': 'Goroke'},
'613432081':{'en': '<NAME>'},
'613432082':{'en': 'Horsham'},
'613432083':{'en': 'Jeparit'},
'613432084':{'en': 'Kalkee'},
'613432085':{'en': 'Kaniva'},
'613432086':{'en': 'Laharum'},
'613432087':{'en': 'Lake Bolac'},
'613432088':{'en': 'Landsborough'},
'613432089':{'en': 'Learmonth'},
'613432090':{'en': 'Linton'},
'613432091':{'en': 'Lorquon'},
'613432092':{'en': 'Marnoo'},
'613432093':{'en': 'Maroona'},
'613432094':{'en': 'Minimay'},
'613432095':{'en': 'Minyip'},
'613432096':{'en': 'Mount Wallace'},
'613432097':{'en': 'Moyston'},
'613432098':{'en': 'Murtoa'},
'613432099':{'en': 'Natimuk'},
'613432100':{'en': 'Navarre'},
'613432101':{'en': 'Nhill'},
'613432102':{'en': 'Polkemmet'},
'613432103':{'en': 'Rainbow'},
'613432104':{'en': 'Rokewood'},
'613432105':{'en': 'Scarsdale'},
'613432106':{'en': 'Serviceton'},
'613432107':{'en': 'Skipton'},
'613432108':{'en': 'Stawell'},
'613432109':{'en': 'Stoneleigh'},
'61343211':{'en': 'Horsham'},
'613432120':{'en': 'Streatham'},
'613432121':{'en': '<NAME>'},
'613432122':{'en': 'Warracknabeal'},
'613432123':{'en': 'Wilkur'},
'613432124':{'en': 'Willaura'},
'613432125':{'en': 'Yaapeet'},
'613432126':{'en': 'Ararat'},
'613432127':{'en': '<NAME>'},
'613432128':{'en': 'Ballan'},
'613432129':{'en': 'Ballarat'},
'613432130':{'en': 'Balliang'},
'613432131':{'en': 'Bangerang'},
'613432132':{'en': 'Banyena'},
'613432133':{'en': 'Beaufort'},
'613432134':{'en': 'Beulah'},
'613432135':{'en': 'Broughton'},
'613432136':{'en': 'Buangor'},
'613432137':{'en': 'Buninyong'},
'613432138':{'en': 'Clear Lake'},
'613432139':{'en': 'Creswick'},
'613432140':{'en': 'Crymelon'},
'613432141':{'en': '<NAME>'},
'613432142':{'en': 'Daylesford'},
'613432143':{'en': 'Dimboola'},
'613432144':{'en': 'Elmhurst'},
'613432145':{'en': '<NAME>'},
'613432146':{'en': 'Glenisla'},
'613432147':{'en': 'Glenorchy'},
'613432148':{'en': 'Goroke'},
'613432149':{'en': 'Halls Gap'},
'613432150':{'en': 'Horsham'},
'613432151':{'en': 'Jeparit'},
'613432152':{'en': 'Kalkee'},
'613432153':{'en': 'Kaniva'},
'613432154':{'en': 'Laharum'},
'613432155':{'en': 'Lake Bolac'},
'613432156':{'en': 'Landsborough'},
'613432157':{'en': 'Learmonth'},
'613432158':{'en': 'Linton'},
'613432159':{'en': 'Lorquon'},
'613432160':{'en': 'Marnoo'},
'613432161':{'en': 'Maroona'},
'613432162':{'en': 'Minimay'},
'613432163':{'en': 'Minyip'},
'613432164':{'en': 'Mount Wallace'},
'613432165':{'en': 'Moyston'},
'613432166':{'en': 'Murtoa'},
'613432167':{'en': 'Natimuk'},
'613432168':{'en': 'Navarre'},
'613432169':{'en': 'Nhill'},
'61343217':{'en': 'Horsham'},
'61343218':{'en': 'Horsham'},
'61343219':{'en': 'Horsham'},
'61343220':{'en': 'Horsham'},
'613432210':{'en': 'Polkemmet'},
'613432211':{'en': 'Rainbow'},
'613432212':{'en': 'Rokewood'},
'613432213':{'en': 'Scarsdale'},
'613432214':{'en': 'Serviceton'},
'613432215':{'en': 'Skipton'},
'613432216':{'en': 'Stawell'},
'613432217':{'en': 'Stoneleigh'},
'613432218':{'en': 'Streatham'},
'613432219':{'en': 'Telopea Downs'},
'613432220':{'en': 'Warracknabeal'},
'613432221':{'en': 'Wilkur'},
'613432222':{'en': 'Willaura'},
'613432223':{'en': 'Yaapeet'},
'61343223':{'en': 'Ballan'},
'61343224':{'en': 'Ballan'},
'61343225':{'en': 'Ballan'},
'61343226':{'en': 'Balliang'},
'61343227':{'en': 'Balliang'},
'61343228':{'en': 'Balliang'},
'61343229':{'en': 'Dadswells Bridge'},
'61343240':{'en': 'Maroona'},
'61343259':{'en': 'Serviceton'},
'6134330':{'en': 'Ballarat'},
| |
tile. When the incoming HSP is
not in the same frame as the tile, it is corrected.
Parameters:
___________
hsp : Hsp object if Hsp to add.
location : either upstream (0) or downstream (1). The location of the
Match relative to the existing tile.
distance - distance between hsp and this tile, used as fill value
"""
difference = distance
if difference == 0 or difference % 3 == 0:
# deletion or substitution respectively - include and translate nts if present
self.add_hsp(hsp, location, difference)
else:
# substitution with frameshift
self.correct_frame(hsp, location, distance)
print >> logfile, "Deletion or substitution in contig relative to hit"
print >> logfile, "Hsp number {} added {} of tile".format(hsp.num, location)
self.printer()
def correct_frame(self, hsp, location, distance):
"""Make a correction between hsps when they are not in the same frame.
Frames are corrected by subtracting from the difference until it is a
multiple of three before adding it to the tile. This trims nucleotides
from the sequence being added to the tile until they translate in the
same frame.
Parameters:
___________
hsp : Hsp object if Hsp to add.
location : either upstream (0) or downstream (1). The location of the
Match relative to the existing tile.
distance : distance between hsp and this tile, used as fill value
"""
difference = distance
while difference % 3 != 0:
# remove nts until difference is divisible by 3
difference -= 1
# add hsp with the cropped nts and translate
self.add_hsp(hsp, location, difference)
print >> logfile, "Hsp number {} not in same frame as tile - correcting".format(hsp.num)
def extendReadingFrame(self, conservative=False, prokaryotic=False):
"""Expand corrected nucletode sequence to the first start and stop codons
found in teh same frame
Parameters:
___________
conservative : bool. Only find first start codon, do not exand to the next
stop farther upstream.
prokaryotic : bool. Use alternate stop codons found in prokaryotic genomes
"""
if not prokaryotic:
startCodons = ["ATG"]
else:
startCodons = ["ATG", "GTG", "TTG"]
stopCodons = ["TAG", "TAA", "TGA"]
#Expand upstream to nearest stop codon (if not conserved) of nearest start codon
#if conservative.
codons = startCodons if conservative else stopCodons
start = self.start
while start >= 0 and not self.contig.sequence[start:start+3] in codons:
if start<3:
break
start -= 3
#Make sure that start is actaully a start or stop codon
if not self.contig.sequence[start:start+3] in codons:
start = self.start
print >> logfile, "Warning, contig {} has no first stop codon within frame".format(self.contig.name)
#Find closest stop codon
end = self.end-3
while end<=len(self.contig.sequence)-3 and not self.contig.sequence[end:end+3] in stopCodons:
end += 3
#Make sure that stop is actually a stop codon
if not self.contig.sequence[end:end+3] in stopCodons:
end = self.end-3
print >> logfile, "Warning, contig {} has no stop codon within frame".format(self.contig.name)
self.start = start
self.end = end+3
self.nt_seq.sequence = "{}{}{}".format(self.contig.sequence[start:self.start],
self.nt_seq.sequence,
self.contig.sequence[self.end:end+3])
self.aa_seq = self.nt_seq.translate_sequence(strand=self.strand)
def determineGaps(self):
"""Replace Xs with a cartesian product of all nucleotides. The length
of Xs signifies the length of the gap. The sequence whos 3-mer count
most accuraltey resembles the codon usage table is returned.
e.g. AAAAXXXXCCGXXCCX
WARNING: NOT FULLY IMPLEMENTED
"""
#Store the best sequence
bestScore = 0
bestSequence = None
print "There are {} combinations to try for {}".format(4**self.nt_seq.count("X"), self.contig.name)
print self.nt_seq
#Create seval terators that return the position of each X
xPat = re.compile("X")
#Try every single combination of nts and see which most closely relates
#to the codon usage
for i, replacement in enumerate(product("ACGT", repeat=self.nt_seq.count("X"))):
xIter = iter(replacement)
replSeq = xPat.sub(lambda m:xIter.next(), self.nt_seq)
#Compare this with the codon usage table
kmers, numSeqs = count_kmers(">t\n{}".format(replSeq).split("\n"), IUPAC_N, 3, normalize=True)
score = 0
for kmer, newCount in kmers.iteritems():
originalUsage = self.codon_usage[kmer]
score += newCount - originalUsage
if score < bestScore:
bestSequence = replSeq
"""Below is another test to add a gap based on codon usage. It first sees if the start
is a multiple of three and has the length of a multiple of three. If so, try all possible
codons that fit. If the start is not a multiple of 3, it finds the most probable codons
that start with the one or two nucleotides before it. Next is sees if the rest of the gap
until the end is a multiple of three. If not, it is clipped until it is. This sequnce is then
tested with all of the best codons. If the end was not a multiple of three, find codons with
one or two nucleotides that follow it. Untested.
"""
if False:
for match in re.finditer("X+", self.nt_seq):
if match.start() % 3 == 0 and len(match.group(0)) % 3 == 0:
#Match starts in frame and the length can hold at least one codon
for codons in product(self.codon_usage.keys(), repeat=len(match.group(0))/3):
replSeq = "{}{}{}".format(self.nt_seq[:match.start()],
"".join(codons),
self.nt_seq[match.end():])
#Which is the best?
else:
start = match.start()
while start % 3 != 0:
start -= 1
startCodons = [codon for codon in self.codon_usage if codon.startswith(self.nt_seq[start:3].replace("X", ""))]
#Which is the best?
end = match.end()
while end+1 % 3 != 0:
end -= 1
middle = end-(match.start()-start)+1
for codons in product(self.codon_usage.keys(), repeat=middle):
replSeq = "{}{}{}".format(self.nt_seq[:match.start()],
"".join(codons),
self.nt_seq[match.end():])
endCodons = [codon for codon in self.codon_usage if codon.endswith(self.nt_seq[end+1:3].replace("X", ""))]
self.nt_seq = bestSequence
def _getCorrectedAA(self, query_seq):
"""Get the aa sequence used as the query and correct sequences that have
repeptative or low-complexity regions filtered from BLASTX by Removing Xs
and gaps.
This function creates regular expression from the Match query sequence.
E.g., if the Match protein sequence were FGTPPXPYII, the regular expression
created would be FGT....YII. This is used to search all the translations of
the contig for a matching sequence.
Parameters:
___________
query_seq : the query sequence
Return:
________
aa : corrected aa sequence
"""
#remove gaps
aa = re.sub(r'-', r'', query_seq)
#first 3 aas - replace X with . for re matching
start = re.sub(r'[X|x]', r'.', aa[:3])
#change * to _ to match stop codons correctly
start = re.sub(r'\*', r'.', start)
#last 3 aas
end = re.sub(r'[X|x]', r'.', aa[-3:])
end = re.sub(r'\*', r'.', end)
#Sequences that match one of the sequces in the six frame trnslation
matches = []
attempt = 0
while not matches and attempt < 2:
#regex for matching
anychar_pattern = "."*(len(aa)-6)
pattern = r"{}{}{}".format(start, anychar_pattern, end)
for frame, protein in self.contig.sixframe():
#search all protein translations for regex
match = re.search(pattern, protein.sequence)
if match:
matches.append(match)
if not matches:
#if there are no matches, try looking for unknown bases as well
start= "({}|J)({}|J)({}|J)".format(*start)
end = "({}|J)({}|J)({}|J)".format(*end)
attempt += 1
if attempt == 2:
raise RuntimeError("Cannot match HSP to Contig {}".format(self.contig.name))
# and assuming there is only one match, the index is correct
match = matches[0].group()
if len(match) != len(aa):
# sanity check
raise RuntimeError("Cannot match HSP to Contig {}".format(self.contig.name))
else:
while 'X' in aa:
#search for X or runs of X
xs = re.search(r'X+', aa)
#indices of xs matched above
s = xs.start()
e = xs.end()
#portion of contig to use to replace Xs
subst = match[s:e]
#only replace first instance in each loop
aa = re.sub(xs.group(), subst, aa, count=1)
#replace J (from unknown translations) with X as these are genuine unknowns
aa = re.sub(r'J', r'X', aa)
return aa
def printer(self):
"""Writes hsp information to global log File
"""
print >> logfile, "Sequence: {}".format(self.nt_seq)
print >> logfile, "Start: {}, End: {}".format(self.start, self.end)
print >> logfile, "Protein: {}".format(self.aa_seq)
class EmptyTilePath(Tile_Path):
def __init__(self, contig, protein=False, no_hit=0):
"""Initialise an empty tile path - there were no annotations for this
contig.
Parameters:
___________
contig : DNASequence object
protein : bool. output amino acid sequence
no_hit : How to hande sequences with no annotation
0=Ignore
1=Output longest ORF
2=Output frame 1
"""
self.contig = contig
self.tile = False
self.start = 0
self.strand = 1
| |
import random
import os
import time
import logging
# Hide the pygame support prompt
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1"
import pygame
from pygame import Vector2
import sprites
# Using the C API to change the app ModelID
# This changes the taskbar icon from the python one to the game-specific one
import ctypes
appid = "pthompson.trashtosser.1.0" # Arbitrary app string that is arbitrary
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(appid)
pygame.init()
pygame.font.init()
font = lambda size: pygame.font.Font(pygame.font.get_default_font(), size) # Font helper function that returns font of given size
# Game window dimensions
SIZE_X = 1120
SIZE_Y = 560
# Constant rate of gravity which is applied by moving the trash object down each frame
GRAVITY = 1
FRAMECAP = 60 # Target framerate
CHECK_VECTOR = Vector2(1,0) # Horizontal vector so overall angle of objects can be measured
class GameObject(pygame.sprite.Sprite):
"""
Base object template that will be inherited and extended by each specific object
Attributes:
game: trashtosser.Game
The game object of which this object is a child of, used to get important variables such as the screen
pos: Vector2
Top left corner position vector
rect: pygame.Rect
The local rectangle representing the sprite
global_rect: pygame.Rect
Global rect offset by global coordinates used for collision and other inter-object interactions
Methods:
update(dt: float, keys: list)
A function to process game logic taking deltatime and currently pressed down keys and arguments
Has no effects on the base class but will be overridden in each game object
draw()
Draws the objects sprite to the screen, calculating rotations and position
"""
def __init__(self, parent, x, y, image=None, angle=0):
super(GameObject, self).__init__() # Initialise the parent object
self.game = parent
self.pos = Vector2(x,y)
self.angle = angle
self.image = image or pygame.Surface((0, 0))
self.rect = self.image.get_rect()
self.global_rect = pygame.Rect(self.rect.x+x,self.rect.y+y,self.rect.w,self.rect.h) # Add the objects position to its local rect
def update(self, dt, keys):
"""
Class-specific method to update position, check collision, etc.. that is run every frame
On the base object it does nothing but should be overwritten for each object
It exists here so an error is not raised if an object does not have it overwritten for whatever reason
"""
pass
def draw(self):
"""
Rotates the sprite by appropriate angle and then draws to the game screen
"""
if self.angle == 0: # Skip rotation calculations if there is no rotation
pos = (self.pos.x,self.pos.y)
image = self.image
else:
width,height = self.image.get_size()
# Calculating and rotating the bounding boxes for the new sprite
boundary = [
Vector2(0,0),
Vector2(width,0),
Vector2(width,-height),
Vector2(0,-height)
]
boundary = [vector.rotate(self.angle) for vector in boundary]
min_x = min(boundary, key=lambda vec: vec.x)[0] # Getting the lowest x value of each vector in the bounding box using an anonymous function as the key
max_y = max(boundary, key=lambda vec: vec.y)[1] # Same thing but finding the max y value
center = Vector2(
(width/2),
-(height/2)
)
rotation_offset = center.rotate(self.angle) - center
pos = (
self.pos.x + min_x - rotation_offset.x,
self.pos.y + max_y - rotation_offset.y
)
# Rotating the image before blitting it to the screen below
image = pygame.transform.rotate(self.image, self.angle)
# Reassigning rect and global_rect with new positions
self.rect = image.get_rect()
self.global_rect = pygame.Rect(
pos[0], pos[1],
self.rect.w, self.rect.h
)
# Rendering to screen the newly calculated image and position
self.game.surface.blit(image, pos)
class TrashObject(GameObject):
"""
Object representing a piece of trash and inheriting the GameObject class
Attributes:
pos: Vector2
The position vector of the objects top left corner relative to the origin
vel: Vector2
A relative vector representing the velocity. Coordinates are relative to TrashObject.pos as they are added each frame
initial_length: int
The initial magnitude of the velocity vector, used to calculate the scaling of the power used to launch the object
TRAJECTORY_BALLS: int
A constant value representing the amount of trajectory prediction orbs are to be calculated and drawn
trajectory: list[Vector2], old_trajectory: list[Vector2]
Lists of position vectors representing the predicted trajectory of the object
The trajectory for the last launch is also stored and shown in a lighter colour so the player can compare them
collider: pygame.Rect
A pygame rectangle effectively equal to the global rect plus the objects velocity. Used to calculate collisions with obstacles
Methods:
draw()
Overrides the GameObject draw function as this trash object also requires the trajectories to be drawn.
Calls super().draw() at the end of the function so the sprite is still drawn as normal
update(dt: float, keys: list)
Overrides the GameObject update function with logic to calculate position, velocity, gravity, and aiming
flipy()
Flip the velocity vertically. Used in collision and bouncing calculations
Also lower the velocity by a factor of 1/3 as objects lose speed when they bounce
"""
def __init__(self, parent, x, y, vx, vy, type):
super().__init__(parent, x, y, sprites.trash(type))
self.type = type
# Position and velocity vectors
self.pos = Vector2(x,y)
self.vel = Vector2(vx,vy)
self.paused = True
self.initial_length = self.vel.magnitude()
# Scalar for the object so the launch power can be changed
self.power = 1
self.TRAJECTORY_BALLS = 15 # Amount of trajectory preciction orbs to display
self.trajectory = []
self.old_trajectory = []
self.collider = pygame.Rect(self.global_rect.x + self.vel.x,
self.global_rect.y + self.vel.y,
self.global_rect.w,
self.global_rect.h) # Collision rect made by adding the velocity to the global rect
def draw(self):
for dot in self.old_trajectory:
pygame.draw.circle(self.game.surface,(128,128,255), dot[0], dot[1])
if self.paused:
for dot in self.trajectory:
pygame.draw.circle(self.game.surface,(255,255,255),dot[0],dot[1])
super().draw() # Calling parent draw to draw sprite
def update(self, dt, keys):
# Set the velocity to zero if the object is outside the game frame so the game can reset quicker
if 0 > self.pos.x or self.pos.x > SIZE_X:
self.vel = Vector2(0,0)
# Calculate the trajectory indicators
if self.paused:
self.trajectory = []
for i in range(self.TRAJECTORY_BALLS):
# Some long calculations for finding the predicted location for each of the trajectory orbs
# Involves adding the number of the ball multiplied by the velocity plus the gravity to the position of the ball
# The size of the orb is also calculated using the number of the ball (5-i/4)
self.trajectory.append((self.pos+i*Vector2(self.vel.x,self.vel.y+0.5*GRAVITY*i)+Vector2(self.global_rect.w/2,self.global_rect.h/2), 5-i/4))
# Only calculate if the object is moving and not paused
if not self.paused and self.vel != Vector2(0,0):
self.angle -= 1 if self.vel.x > 0 else -1
# Apply velocity
self.pos += Vector2(self.vel.x,self.vel.y) * dt
# Apply gravity
self.vel.y += GRAVITY * dt
# If the object is off screen
if self.pos.y >= (SIZE_Y-self.rect.h) - (self.vel.y * dt):
# If the object is going very slow then don't flip it and set the velocity to 0 instead so it doesn't jitter up and down
if abs(self.vel.x) < 5 and abs(self.vel.y) < 5 and self.pos.y > (SIZE_Y-100):
self.vel = Vector2(0, 0)
self.pos.y = SIZE_Y-self.rect.h
# Otherwise, flip the y velocity and times the velocity by 2/3 of it's current
else:
self.flipy()
# Make the modifier lower if shift is held down so the aiming can be more fine-tuned
if keys[pygame.K_LSHIFT]:
mod = 0.3
else:
mod = 1
if self.paused:
# Rotate with left and right arrows and restricting rotation to 180 degrees to the right
# Rotating using deltatime so the rotation is framerate independent
if keys[pygame.K_LEFT]:
if self.vel.angle_to(CHECK_VECTOR) < 90:
self.vel.rotate_ip(-mod * dt)
if keys[pygame.K_RIGHT]:
if self.vel.angle_to(CHECK_VECTOR) > -90:
self.vel.rotate_ip(mod * dt)
# Change power with up and down by scaling the velocity magnitude by the initial length times the scalar
if keys[pygame.K_UP]:
if self.power < 3:
self.power += mod * 0.02 * dt
self.vel.scale_to_length(self.initial_length * self.power)
if keys[pygame.K_DOWN]:
if self.power > 0.5:
self.power -= mod * 0.02 * dt
self.vel.scale_to_length(self.initial_length * self.power)
# Recalculate the collider rect each update
self.collider = pygame.Rect(self.pos.x + self.vel.x,
self.pos.y + self.vel.y,
self.global_rect.w,
self.global_rect.h)
def reset(self):
# Reset all attributes of the object and generate a new type
self.pos = Vector2(20, 400)
self.vel = Vector2(10, -10)
self.paused = True
self.power = 1
self.angle = 0
self.type = random.randint(0,2)
self.image = sprites.trash(self.type)
# Only flip if the object is moving
def flipy(self):
if self.vel != Vector2(0,0):
self.vel.y *= -1
self.vel.y *= 0.8
self.vel.x *= 0.66
class Bin(GameObject):
"""
Object representing a rubbish bin, the goal for the player to shoot the trash object into
Attributes:
pos: Vector2
Position vector of | |
self.filename = filename
f = open(filename, 'rb')
# read the fileheader
self.dic = fileheader2dic(get_fileheader(f))
if self.dic["naxis"] != 2:
raise Exception("file is not a 2D Sparky file")
# read in the axisheaders
self.dic["w1"] = axisheader2dic(get_axisheader(f))
self.dic["w2"] = axisheader2dic(get_axisheader(f))
f.close()
# sizes
self.lenY = self.dic["w1"]["npoints"]
self.lenX = self.dic["w2"]["npoints"]
# tile sizes
self.lentY = self.dic["w1"]["bsize"]
self.lentX = self.dic["w2"]["bsize"]
# check order
if order is None:
order = (0, 1)
# finalize
self.dtype = np.dtype("float32")
self.order = order
self.fshape = (self.lenY, self.lenX)
self.__setdimandshape__()
def __fcopy__(self, order):
"""
Create a copy
"""
n = sparky_2d(self.filename, order)
return n
def __fgetitem__(self, slices):
"""
Returns ndarray of selected values.
(sY, sX) is a well formatted tuple of slices
"""
sY, sX = slices
f = open(self.filename, 'rb')
# print(sY,sX)
gY = range(self.lenY)[sY] # list of values to take in Y
gX = range(self.lenX)[sX] # list of values to take in X
# tiles to get in each dim to read
# Y tile to read
gtY = set([int(np.floor(i / self.lentY)) for i in gY])
# X tile to read
gtX = set([int(np.floor(i / self.lentX)) for i in gX])
# create a empty output directory
out = np.empty((len(gY), len(gX)), dtype=self.dtype)
for iY in gtY: # loop over Y tiles to get
for iX in gtX: # loop over X tiles to get
# get the tile and reshape it
ntile = int(iY * np.ceil(self.lenX / self.lentX) + iX)
tile = get_tilen(f, ntile, (self.lentX, self.lentY))
tile = tile.reshape(self.lentY, self.lentX)
# tile minimum and max values for each dim
minX = iX * self.lentX
maxX = (iX + 1) * self.lentX
minY = iY * self.lentY
maxY = (iY + 1) * self.lentY
# determind what elements are needed from this tile
XinX = [i for i in gX if maxX > i >= minX] # values in gX
XinT = [i - minX for i in XinX] # tile index values
XinO = [gX.index(i) for i in XinX] # output indexes
YinY = [i for i in gY if maxY > i >= minY] # values in gX
YinT = [i - minY for i in YinY] # tile index values
YinO = [gY.index(i) for i in YinY] # output indexes
# take elements from the tile
ctile = tile.take(XinT, axis=1).take(YinT, axis=0)
# DEBUGGING info
# print("-------------------------------")
# print("iX:",iX,"iY:",iY,"ntile:",ntile)
# print("tile.shape",tile.shape)
# print("minX:",minX,"maxX",maxX)
# print("minY:",minY,"maxY",maxY)
# print("XinX",XinX)
# print("XinT",XinT)
# print("XinO",XinO)
# print("YinY",YinY)
# print("YinT",YinT)
# print("YinO",YinO)
# put the cut tile to the out array (uses some fancy indexing)
out[np.ix_(YinO, XinO)] = ctile
f.close()
return out
class sparky_3d(fileiobase.data_nd):
"""
Emulates a ndarray object without loading data into memory for low memory
read of 3D Sparky files.
* slicing operations return ndarray objects.
* can iterate over with expected results.
* transpose and swapaxes methods create a new objects with correct axes
ordering.
* has ndim, shape, and dtype attributes.
Parameters
----------
filename : str
Filename of 3D Sparky file.
order : tuple
Ordering of axes against file. None is equilent to (0, 1, 2)
"""
def __init__(self, filename, order=None):
"""
Create and set up object
"""
# open the file
self.filename = filename
f = open(filename, 'rb')
# read the fileheader
self.dic = fileheader2dic(get_fileheader(f))
if self.dic["naxis"] != 3:
raise Exception("file not 3D Sparky file")
# read in the axisheaders
self.dic["w1"] = axisheader2dic(get_axisheader(f))
self.dic["w2"] = axisheader2dic(get_axisheader(f))
self.dic["w3"] = axisheader2dic(get_axisheader(f))
f.close()
# sizes
self.lenZ = self.dic["w1"]["npoints"]
self.lenY = self.dic["w2"]["npoints"]
self.lenX = self.dic["w3"]["npoints"]
# tile sizes
self.lentZ = self.dic["w1"]["bsize"]
self.lentY = self.dic["w2"]["bsize"]
self.lentX = self.dic["w3"]["bsize"]
# check order
if order is None:
order = (0, 1, 2)
# finalize
self.dtype = np.dtype("float32")
self.order = order
self.fshape = (self.lenZ, self.lenY, self.lenX)
self.__setdimandshape__()
def __fcopy__(self, order):
"""
Create a copy
"""
n = sparky_3d(self.filename, order)
return n
def __fgetitem__(self, slices):
"""
Returns ndarray of selected values.
(sZ, sY, sX) is a well formateed tuple of slices
"""
sZ, sY, sX = slices
f = open(self.filename, 'rb')
gZ = range(self.lenZ)[sZ] # list of values to take in Z
gY = range(self.lenY)[sY] # list of values to take in Y
gX = range(self.lenX)[sX] # list of values to take in X
# tiles to get in each dim to read
# Z tiles
gtZ = set([int(np.floor(float(i) / self.lentZ)) for i in gZ])
# Y tiles
gtY = set([int(np.floor(float(i) / self.lentY)) for i in gY])
# X tiles
gtX = set([int(np.floor(float(i) / self.lentX)) for i in gX])
# total tiles in each dim
ttX = int(np.ceil(self.lenX / float(self.lentX))) # total tiles in X
ttY = int(np.ceil(self.lenY / float(self.lentY))) # total tiles in Y
ttZ = int(np.ceil(self.lenZ / float(self.lentZ))) # total tiles in Z
tile_tup = (self.lentZ, self.lentY, self.lentX)
# create a empty output array
out = np.empty((len(gZ), len(gY), len(gX)), dtype=self.dtype)
for iZ in gtZ: # loop over Z tiles to get
for iY in gtY: # loop over Y tiles to get
for iX in gtX: # loop over X tiles to get
# get the tile and reshape it
ntile = iZ * ttX * ttY + iY * ttX + iX
tile = get_tilen(f, ntile, tile_tup)
tile = tile.reshape(tile_tup)
# tile minimum and max values for each dim
minX = iX * self.lentX
maxX = (iX + 1) * self.lentX
minY = iY * self.lentY
maxY = (iY + 1) * self.lentY
minZ = iZ * self.lentZ
maxZ = (iZ + 1) * self.lentZ
# determind what elements are needed from this tile
XinX = [i for i in gX if maxX > i >= minX] # values in gX
XinT = [i - minX for i in XinX] # tile index values
XinO = [gX.index(i) for i in XinX] # output indexes
YinY = [i for i in gY if maxY > i >= minY] # values in gX
YinT = [i - minY for i in YinY] # tile index values
YinO = [gY.index(i) for i in YinY] # output indexes
ZinZ = [i for i in gZ if maxZ > i >= minZ] # values in gX
ZinT = [i - minZ for i in ZinZ] # tile index values
ZinO = [gZ.index(i) for i in ZinZ] # output indexes
# take elements from the tile
ctile = tile.take(XinT, axis=2).take(YinT, axis=1)
ctile = ctile.take(ZinT, axis=0)
# DEBUGGING info
# print("-------------------------------")
# print("iX:",iX,"iY:",iY,"iZ:",iZ,"ntile:",ntile)
# print("ttX:",ttX,"ttY:",ttY,"ttZ",ttZ)
# print("tile.shape",tile.shape)
# print("minX:",minX,"maxX",maxX)
# print("minY:",minY,"maxY",maxY)
# print("minZ:",minZ,"maxZ",maxZ)
# print("XinX",XinX)
# print("XinT",XinT)
# print("XinO",XinO)
# print("YinY",YinY)
# print("YinT",YinT)
# print("YinO",YinO)
# print("ZinZ",ZinZ)
# print("ZinT",ZinT)
# print("ZinO",ZinO)
# put the cut tile to the out array
out[np.ix_(ZinO, YinO, XinO)] = ctile
f.close()
return out
# tile and data get/put functions
def get_tilen(f, n_tile, tw_tuple):
"""
Read a tile from a Sparky file object.
Parameters
----------
f : file object
Open file object pointing to a Sparky file.
n_tile : int
Tile number to read
tw_tuple : tuple of ints
Tile size
Returns
-------
tile : ndarray
Tile of NMR data. Data is returned as a 1D array.
Notes
-----
Current file position is loss. In can be stored before calling if the
position is later needed.
"""
# determind the size of the tile in bytes
tsize = 4
for i in tw_tuple:
tsize = tsize * i
# seek to the beginning of the tile
f.seek(int(180 + 128 * len(tw_tuple) + n_tile * tsize))
return np.frombuffer(f.read(tsize), dtype='>f4')
def get_tile(f, num_points):
"""
Read the next tile from a Sparky file object.
Parameters
----------
f : file object
Open file object pointing to a Sparky file.
num_points : int
Number of points in the tile.
Returns
-------
tile : ndarray
Tile of NMR data. Data is returned as a 1D array.
"""
bsize = num_points * 4 # size in bytes
return np.frombuffer(f.read(bsize), dtype='>f4')
def put_tile(f, tile):
"""
Put a tile to a Sparky file object.
Parameters
----------
f : file object
Open file | |
models.ForeignKey(
'GenomicDataset')
display_name = models.CharField(
max_length=128)
count_matrix = models.ForeignKey(
'FeatureListCountMatrix',
null=True,
help_text='Matrix of read coverage over genomic features')
created = models.DateTimeField(
auto_now_add=True)
last_updated = models.DateTimeField(
auto_now=True)
class Meta:
verbose_name_plural = 'Analysis datasets'
class GenomicBinSettings(models.Model):
ANCHOR_START = 0
ANCHOR_CENTER = 1
ANCHOR_END = 2
ANCHOR_CHOICES = (
(ANCHOR_START, 'start'),
(ANCHOR_CENTER, 'center'),
(ANCHOR_END, 'end'),
)
anchor = models.PositiveSmallIntegerField(
choices=ANCHOR_CHOICES,
default=ANCHOR_CENTER,
help_text='Where to center analysis window relative to BED range')
bin_start = models.IntegerField(
default=-2500,
help_text='Distance from anchor to start designating bins')
bin_number = models.PositiveIntegerField(
default=50,
validators=[MinValueValidator(50), MaxValueValidator(250)],
help_text='Number of bins to use in search window')
bin_size = models.PositiveIntegerField(
default=100,
validators=[MinValueValidator(1)],
help_text='Size of bins to use in search window')
class Meta:
abstract = True
class Analysis(ValidationMixin, GenomicBinSettings):
objects = managers.AnalysisManager()
UPLOAD_TO = 'analysis/'
owner = models.ForeignKey(
settings.AUTH_USER_MODEL)
name = models.CharField(
max_length=128)
slug = models.CharField(
max_length=128)
description = models.TextField(
blank=True)
datasets = models.ManyToManyField(
GenomicDataset,
through=AnalysisDatasets,
through_fields=('analysis', 'dataset'))
genome_assembly = models.ForeignKey(
GenomeAssembly)
feature_list = models.ForeignKey(
FeatureList)
sort_vector = models.ForeignKey(
SortVector,
blank=True,
null=True)
validated = models.BooleanField(
default=False)
validation_errors = models.TextField(
blank=True)
validation_warnings = models.TextField(
blank=True)
start_time = models.DateTimeField(
null=True)
end_time = models.DateTimeField(
null=True)
uuid = models.UUIDField(
default=uuid.uuid4,
editable=False)
public = models.BooleanField(
default=False)
output = models.FileField(
upload_to=UPLOAD_TO,
max_length=256,
blank=True,
null=True)
created = models.DateTimeField(
auto_now_add=True)
last_updated = models.DateTimeField(
auto_now=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def validate(self):
validator = validators.AnalysisValidator(
bin_anchor=self.get_anchor_display(),
bin_start=self.bin_start,
bin_number=self.bin_number,
bin_size=self.bin_size,
feature_bed=self.feature_list.dataset.path,
chrom_sizes=self.genome_assembly.chromosome_size_file,
stranded_bed=self.feature_list.stranded,
)
validator.validate()
return validator.is_valid, validator.display_errors()
def get_absolute_url(self):
return reverse('analysis:analysis',
args=[self.pk, self.slug])
def get_execute_url(self):
return reverse('analysis:analysis_execute',
args=[self.pk, self.slug])
def get_visuals_url(self):
return reverse('analysis:analysis_visual',
args=[self.pk, self.slug])
def get_form_cancel_url(self):
if self.id:
return self.get_absolute_url()
else:
return reverse('analysis:dashboard')
def get_update_url(self):
return reverse('analysis:analysis_update',
args=[self.pk, self.slug])
def get_delete_url(self):
return reverse('analysis:analysis_delete',
args=[self.pk, self.slug])
def get_zip_url(self):
return reverse('analysis:analysis_zip',
args=[self.pk, self.slug])
def is_reset_required(self, ids):
"""
Determine if analysis reset is required (requires re-computation).
If certain settings have changed, reset validation and output results.
This method should be called from a changed form-instance, before
saving.
"""
formObj = self
id_ = formObj.id
reset = False
if id_ is None:
reset = True
else:
dbObj = self.__class__.objects.get(id=id_)
for fld in [
'anchor',
'bin_start',
'bin_number',
'bin_size',
'genome_assembly',
'feature_list_id',
'sort_vector_id',
]:
if getattr(dbObj, fld) != getattr(formObj, fld):
reset = True
break
dbIds = set(dbObj.analysisdatasets_set.values_list('dataset_id', flat=True))
formIds = set(ids)
if dbIds != formIds:
reset = True
logger.info('Analysis reset required: %s' % reset)
return reset
def reset_analysis_object(self):
formObj = self
formObj.validated = False
formObj.validation_errors = ''
formObj.validation_warnings = ''
formObj.output = None
formObj.start_time = None
formObj.end_time = None
cache.delete(self.output_cache_key)
def execute_time_estimate(self):
# estimate execution time, in seconds
n = float(self.datasets.count())
workers = 10
base = 300
matrices = math.ceil(n / workers) * 90
agg = 120 + math.log10(n)**2 * 60
return base + matrices + agg
@property
def user_datasets(self):
return UserDataset.objects.filter(id__in=self.datasets.values_list('id', flat=True))
@property
def encode_datasets(self):
return EncodeDataset.objects.filter(id__in=self.datasets.values_list('id', flat=True))
@property
def analysis_user_datasets(self):
return self.analysisdatasets_set.filter(dataset__in=self.user_datasets)
@property
def analysis_encode_datasets(self):
return self.analysisdatasets_set.filter(dataset__in=self.encode_datasets)
def get_form_datasets(self):
uds = list(self.analysis_user_datasets.values('dataset_id', 'display_name'))
eds = list(self.analysis_encode_datasets.values('dataset_id', 'display_name'))
for ds in itertools.chain(uds, eds):
ds['dataset'] = ds['dataset_id']
del ds['dataset_id']
return json.dumps({
"userDatasets": uds,
"encodeDatasets": eds,
})
def to_dict(self):
# Useful for debugging
d = model_to_dict(self)
d.pop('datasets')
d.pop('output')
d['feature_list_path'] = self.feature_list.dataset.path
if self.sort_vector:
d['sort_vector_path'] = self.sort_vector.dataset.path
d['user_datasets'] = [ds.to_dict() for ds in self.user_datasets]
d['encode_datasets'] = [ds.to_dict() for ds in self.encode_datasets]
return d
class Meta:
verbose_name_plural = 'Analyses'
def user_can_view(self, user):
return self.public or self.owner == user or user.is_staff
def user_can_edit(self, user):
return self.owner == user or user.is_staff
def get_flcm_ids(self):
return list(self.analysisdatasets_set.values_list('count_matrix', flat=True))
@property
def is_ready_to_run(self):
return self.validated and not self.is_running and not self.is_complete
@property
def is_running(self):
return self.start_time and not self.end_time
@property
def is_complete(self):
return self.start_time is not None and self.end_time is not None
@property
def execute_task_id(self):
return 'analysis-execute-{}'.format(self.id)
def execute(self, silent=False):
# intentionally don't fire save signal
self.__class__.objects\
.filter(id=self.id)\
.update(
start_time=now(),
end_time=None,
)
tasks.execute_analysis.apply_async(
args=[self.id, silent], task_id=self.execute_task_id)
def create_matrix_list(self):
return [
[ads.count_matrix.id, ads.display_name, ads.count_matrix.matrix.path]
for ads in self.analysisdatasets_set.all().prefetch_related('count_matrix')
]
def execute_mat2mat(self):
matrix_list = self.create_matrix_list()
sv = None
if self.sort_vector:
sv = self.sort_vector.dataset.path
mm = MatrixByMatrix(
feature_bed=self.feature_list.dataset.path,
matrix_list=matrix_list,
annotation=self.genome_assembly.annotation_file,
window_start=self.bin_start,
bin_number=self.bin_number,
bin_size=self.bin_size,
sort_vector=sv,
)
fn = get_random_filename(os.path.join(settings.MEDIA_ROOT, self.UPLOAD_TO))
mm.writeJson(fn)
return os.path.join(self.UPLOAD_TO, os.path.basename(fn))
@property
def output_cache_key(self):
return 'analysis-%s' % self.id
@property
def output_json(self):
key = self.output_cache_key
obj = cache.get(key)
if not obj:
with open(self.output.path, 'r') as f:
output = json.loads(f.read())
obj = output
cache.set(key, obj)
return obj
@property
def sort_vector_cache_key(self):
return 'analysis-sort-vector-%s' % self.id
@property
def sort_vector_df(self):
sv = None
if self.sort_vector is not None:
key = self.sort_vector_cache_key
sv = cache.get(key)
if sv is None:
sv = pd.read_csv(
self.sort_vector.dataset.path, sep='\t', header=None
)
cache.set(key, sv)
return sv
@property
def matrices(self):
if not self.output:
return False
names = []
ids = []
for row in self.output_json['dsc_full_data']['rows']:
names.append(row['row_name'])
ids.append(row['row_id'])
return {'names': names, 'ids': ids}
def get_fc_vectors_ngs_list(self):
if not self.output:
return False
return self.output_json['fc_vectors']['col_names']
def get_analysis_overview_init(self):
if not self.output:
return False
sv = self.sort_vector_df
if sv is not None:
sv = sv.as_matrix(columns=[1]).flatten()
data = {
'dscRepData': self.output_json['dsc_rep_data'],
'dendrogram': self.output_json['dsc_dendrogram'],
'sort_vector': sv,
}
return data
def get_individual_overview_init(self):
if not self.output:
return False
matrices = self.matrices
sv = None
if self.sort_vector_df is not None:
sv = self.sort_vector_df.to_json()
data = {
'col_names': self.output_json['dsc_full_data']['col_names'],
'matrix_names': matrices['names'],
'matrix_IDs': matrices['ids'],
'sort_vector': sv,
}
return data
def get_feature_clustering_overview_init(self):
centroids = self.output_json['fc_centroids']
upper_quartile = numpy.array(self.output_json['fc_vectors']['q3'],
dtype=numpy.float)
for k in centroids:
for cluster in centroids[k]:
centroids[k][cluster] = numpy.nan_to_num(
numpy.array(centroids[k][cluster], dtype=numpy.float) /
upper_quartile)
data = {
'dendrogram': self.output_json['dsc_dendrogram'],
'matrix_names': self.matrices['names'],
'fcCentroids': centroids,
}
return data
def get_clust_boxplot_values(self, k, col_index):
box_plot_values = dict()
cluster_values = defaultdict(list)
vectors = self.output_json['fc_vectors']['vectors']
for cluster, features in \
self.output_json['fc_clusters'][str(k)].items():
for feature in features:
cluster_values[cluster].append(vectors[feature][col_index])
cluster_values['all'].append(vectors[feature][col_index])
for key, _list in cluster_values.items():
_array = numpy.array(_list, dtype=numpy.float)
q1 = numpy.percentile(_array, 25)
q2 = numpy.percentile(_array, 50)
q3 = numpy.percentile(_array, 75)
iqr = q3 - q1
lower = q1 - 1.5 * iqr
upper = q3 + 1.5 * iqr
outliers = []
_max = float('-inf')
_min = float('inf')
for val in numpy.nditer(_array):
if val < lower or val > upper:
outliers.append(val)
else:
if val > _max:
_max = val
if val < _min:
_min = val
box_plot_values[key] = {
'q1': q1,
'q2': q2,
'q3': q3,
'min': _min,
'max': _max,
'outliers': outliers,
}
clusters = list(sorted(cluster_values.keys()))
p_values = numpy.empty((len(clusters), len(clusters)))
for i, c_1 in enumerate(clusters):
for j, c_2 in enumerate(clusters[i:]):
clust_1 = cluster_values[c_1]
clust_2 = cluster_values[c_2]
try:
statistic, p = stats.mannwhitneyu(
clust_1,
clust_2,
alternative='two-sided'
)
except ValueError:
p = 1
p_values[i][i + j] = p
p_values[i + j][i] = p
mann_whitney_results = {
'clusters': clusters,
'p_values': p_values,
}
return box_plot_values, mann_whitney_results
def get_dsc_full_row_value(self, row_name):
if not self.output:
return False
i = next(index for (index, d) in
enumerate(self.output_json['dsc_full_data']['rows']) if
d['row_name'] == row_name)
return self.output_json['dsc_full_data']['rows'][i]['row_data']
def get_dsc_name_to_id(self, row_name):
if not self.output:
return False
i = next(index for (index, d) in
enumerate(self.output_json['dsc_full_data']['rows']) if
d['row_name'] == row_name)
return self.output_json['dsc_full_data']['rows'][i]['row_id']
def get_cluster_members(self, k, cluster):
entry_list = []
gene_list = []
# READ FEATURE LIST
feature_to_line = dict()
count = 0
total_valid_lines = BedMatrix.countValidBedLines(self.feature_list.dataset.path)
with open(self.feature_list.dataset.path) as f:
for line in f:
if not BedMatrix.checkHeader(line):
bed_fields = len(line.strip().split())
name = None
if bed_fields >= 4: # Contains name information?
name = line.strip().split()[3]
if name is None or name in BedMatrix.DUMMY_VALUES:
name = BedMatrix.generateFeatureName(
"feature", count, total_valid_lines)
count += 1
feature_to_line[name] = line.strip()
# GET GENE ASSOCIATIONS FROM JSON
if not self.output:
return False
feature_to_gene = self.output_json['feature_to_gene']
# CREATE ENTRY LINES AND GENE LISTS, RETURN ZIPPED
for feature in self.output_json['fc_clusters'][str(k)][str(cluster)]:
entry_list.append(feature_to_line[feature])
gene_list.append(feature_to_gene[feature])
return(zip(entry_list, gene_list))
def get_feature_data(self, feature_name):
if not self.output:
return False
return self.output_json['fc_vectors']['vectors'][feature_name]
def get_k_clust_heatmap(self, k_value, dim_x, dim_y):
fc_vectors = self.output_json['fc_vectors']['vectors']
fc_clusters = self.output_json['fc_clusters'][str(k_value)]
upper_quartile = numpy.array(
self.output_json['fc_vectors']['q3'], dtype=numpy.float)
display_values = []
cluster_sizes = dict()
for cluster in sorted(fc_clusters, key=lambda x: int(x)):
cluster_sizes[cluster] = len(fc_clusters[cluster])
for member in fc_clusters[cluster]:
display_values.append(fc_vectors[member])
display_values = numpy.array(display_values, dtype=numpy.float)
display_values = display_values / upper_quartile
display_values = numpy.nan_to_num(display_values)
ncols = len(display_values[0])
nrows = len(display_values)
if ncols > dim_x:
zoom_x = dim_x / ncols
else:
zoom_x = 1
if nrows > dim_y:
zoom_y = dim_y / nrows
else:
zoom_y = 1
zoomed_data = ndimage.zoom(
display_values, (zoom_y, zoom_x), order=0)
return {
'display_data': zoomed_data,
'cluster_sizes': cluster_sizes,
| |
<reponame>RishabhSehgal/keras_cv_attention_models<filename>keras_cv_attention_models/coco/anchors_func.py
import tensorflow as tf
from tensorflow.keras import backend as K
def get_feature_sizes(input_shape, pyramid_levels=[3, 7]):
# https://github.com/google/automl/tree/master/efficientdet/utils.py#L509
feature_sizes = [input_shape[:2]]
for _ in range(max(pyramid_levels)):
pre_feat_size = feature_sizes[-1]
feature_sizes.append(((pre_feat_size[0] - 1) // 2 + 1, (pre_feat_size[1] - 1) // 2 + 1)) # ceil mode, like padding="SAME" downsampling
return feature_sizes
def get_anchors(input_shape=(512, 512, 3), pyramid_levels=[3, 7], aspect_ratios=[1, 2, 0.5], num_scales=3, anchor_scale=4, grid_zero_start=False):
"""
>>> from keras_cv_attention_models.coco import anchors_func
>>> input_shape = [512, 128]
>>> anchors = anchors_func.get_anchors([512, 128], pyramid_levels=[7])
>>> anchors.draw_bboxes(anchors * [512, 128, 512, 128])
grid_zero_start: grid starts from 0, else from strides // 2. False for efficientdet anchors, True for yolo anchors.
"""
# base anchors
scales = [2 ** (ii / num_scales) * anchor_scale for ii in range(num_scales)]
aspect_ratios_tensor = tf.convert_to_tensor(aspect_ratios, dtype="float32")
if len(aspect_ratios_tensor.shape) == 1:
# aspect_ratios = [0.5, 1, 2]
sqrt_ratios = tf.sqrt(aspect_ratios_tensor)
ww_ratios, hh_ratios = sqrt_ratios, 1 / sqrt_ratios
else:
# aspect_ratios = [(1, 1), (1.4, 0.7), (0.7, 1.4)]
ww_ratios, hh_ratios = aspect_ratios_tensor[:, 0], aspect_ratios_tensor[:, 1]
base_anchors_hh = tf.reshape(tf.expand_dims(scales, 1) * tf.expand_dims(hh_ratios, 0), [-1])
base_anchors_ww = tf.reshape(tf.expand_dims(scales, 1) * tf.expand_dims(ww_ratios, 0), [-1])
base_anchors_hh_half, base_anchors_ww_half = base_anchors_hh / 2, base_anchors_ww / 2
base_anchors = tf.stack([base_anchors_hh_half * -1, base_anchors_ww_half * -1, base_anchors_hh_half, base_anchors_ww_half], axis=1)
# base_anchors = tf.gather(base_anchors, [3, 6, 0, 4, 7, 1, 5, 8, 2]) # re-order according to official generated anchors
# make grid
pyramid_levels = list(range(min(pyramid_levels), max(pyramid_levels) + 1))
feature_sizes = get_feature_sizes(input_shape, pyramid_levels)
all_anchors = []
for level in pyramid_levels:
stride_hh, stride_ww = feature_sizes[0][0] / feature_sizes[level][0], feature_sizes[0][1] / feature_sizes[level][1]
top, left = (0, 0) if grid_zero_start else (stride_hh / 2, stride_ww / 2)
hh_centers = tf.range(top, input_shape[0], stride_hh)
ww_centers = tf.range(left, input_shape[1], stride_ww)
ww_grid, hh_grid = tf.meshgrid(ww_centers, hh_centers)
grid = tf.reshape(tf.stack([hh_grid, ww_grid, hh_grid, ww_grid], 2), [-1, 1, 4])
anchors = tf.expand_dims(base_anchors * [stride_hh, stride_ww, stride_hh, stride_ww], 0) + tf.cast(grid, base_anchors.dtype)
anchors = tf.reshape(anchors, [-1, 4])
all_anchors.append(anchors)
all_anchors = tf.concat(all_anchors, axis=0) / [input_shape[0], input_shape[1], input_shape[0], input_shape[1]]
# if width_first:
# all_anchors = tf.gather(all_anchors, [1, 0, 3, 2], axis=-1)
# Save all parameters with anchors, for serialize saving
all_anchors.input_shape, all_anchors.pyramid_levels, all_anchors.aspect_ratios = input_shape, pyramid_levels, aspect_ratios
all_anchors.num_scales, all_anchors.anchor_scale, all_anchors.grid_zero_start = num_scales, anchor_scale, grid_zero_start
return all_anchors
def get_anchor_free_anchors(input_shape=(512, 512, 3), pyramid_levels=[3, 5], grid_zero_start=True):
return get_anchors(input_shape, pyramid_levels, aspect_ratios=[1], num_scales=1, anchor_scale=1, grid_zero_start=grid_zero_start)
def get_yolor_anchors(input_shape=(640, 640), pyramid_levels=[3, 5], offset=0.5):
# assert max(pyramid_levels) - min(pyramid_levels) < 3
# width first to height first
if max(pyramid_levels) - min(pyramid_levels) < 3:
anchor_ratios = tf.convert_to_tensor([[[16.0, 12], [36, 19], [28, 40]], [[75, 36], [55, 76], [146, 72]], [[110, 142], [243, 192], [401, 459]]])
else:
anchor_ratios = tf.convert_to_tensor(
[[[27.0, 19], [40, 44], [94, 38]], [[68, 96], [152, 86], [137, 180]], [[301, 140], [264, 303], [542, 238]], [[615, 436], [380, 739], [792, 925]]]
)
pyramid_levels = list(range(min(pyramid_levels), max(pyramid_levels) + 1))
feature_sizes = get_feature_sizes(input_shape, pyramid_levels)
all_anchors = []
for level, anchor_ratio in zip(pyramid_levels, anchor_ratios):
stride_hh, stride_ww = feature_sizes[0][0] / feature_sizes[level][0], feature_sizes[0][1] / feature_sizes[level][1]
# hh_grid, ww_grid = tf.meshgrid(tf.range(feature_sizes[level][0]), tf.range(feature_sizes[level][1]))
ww_grid, hh_grid = tf.meshgrid(tf.range(feature_sizes[level][1]), tf.range(feature_sizes[level][0]))
grid = tf.cast(tf.stack([hh_grid, ww_grid], 2), "float32") - offset
grid = tf.reshape(grid, [-1, 1, 2]) # [1, level_feature_sizes, 2]
cur_base_anchors = anchor_ratio[tf.newaxis, :, :] # [num_anchors, 1, 2]
grid_nd = tf.repeat(grid, cur_base_anchors.shape[1], axis=1) * [stride_hh, stride_ww]
cur_base_anchors_nd = tf.repeat(cur_base_anchors, grid.shape[0], axis=0)
stride_nd = tf.zeros_like(grid_nd) + [stride_hh, stride_ww]
# yield grid_nd, cur_base_anchors_nd, stride_nd
anchors = tf.concat([grid_nd, cur_base_anchors_nd, stride_nd], axis=-1)
all_anchors.append(tf.reshape(anchors, [-1, 6]))
all_anchors = tf.concat(all_anchors, axis=0) / ([input_shape[0], input_shape[1]] * 3)
return all_anchors
def get_pyramid_levels_by_anchors(input_shape, total_anchors, num_anchors, pyramid_levels_min=3):
feature_sizes = get_feature_sizes(input_shape, [pyramid_levels_min, pyramid_levels_min + 10])
feature_sizes = tf.convert_to_tensor(feature_sizes, dtype="float32")
pyramid_levels = []
level = pyramid_levels_min
total_anchors /= num_anchors
while total_anchors > 0:
pyramid_levels.append(level)
stride_hh, stride_ww = feature_sizes[0][0] / feature_sizes[level][0], feature_sizes[0][1] / feature_sizes[level][1]
cur_num_anchors = tf.math.ceil(input_shape[0] / stride_hh) * tf.math.ceil(input_shape[1] / stride_ww)
total_anchors -= int(cur_num_anchors)
level += 1
return pyramid_levels
def iou_nd(bboxes, anchors):
# bboxes: [[top, left, bottom, right]], anchors: [[top, left, bottom, right]]
anchors_nd, bboxes_nd = tf.expand_dims(anchors, 0), tf.expand_dims(bboxes, 1)
inter_top_left = tf.maximum(anchors_nd[:, :, :2], bboxes_nd[:, :, :2])
inter_bottom_right = tf.minimum(anchors_nd[:, :, 2:], bboxes_nd[:, :, 2:])
inter_hw = tf.maximum(inter_bottom_right - inter_top_left, 0)
inter_area = inter_hw[:, :, 0] * inter_hw[:, :, 1]
bboxes_area = (bboxes[:, 2] - bboxes[:, 0]) * (bboxes[:, 3] - bboxes[:, 1])
anchors_area = (anchors[:, 2] - anchors[:, 0]) * (anchors[:, 3] - anchors[:, 1])
union_area = (tf.expand_dims(bboxes_area, 1) + tf.expand_dims(anchors_area, 0)) - inter_area
return inter_area / union_area
def corners_to_center_yxhw_nd(ss):
""" input: [top, left, bottom, right], output: [center_h, center_w], [height, width] """
return (ss[:, :2] + ss[:, 2:]) * 0.5, ss[:, 2:] - ss[:, :2]
def center_yxhw_to_corners_nd(ss):
""" input: [center_h, center_w, height, width], output: [top, left, bottom, right] """
top_left = ss[:, :2] - ss[:, 2:] * 0.5
bottom_right = top_left + ss[:, 2:]
return tf.concat([top_left, bottom_right], axis=-1)
def assign_anchor_classes_by_iou_with_bboxes(bbox_labels, anchors, ignore_threshold=0.4, overlap_threshold=0.5):
num_anchors = anchors.shape[0]
bbox_labels = tf.gather_nd(bbox_labels, tf.where(bbox_labels[:, -1] > 0))
bboxes, labels = bbox_labels[:, :4], bbox_labels[:, 4]
anchor_ious = iou_nd(bboxes, anchors) # [num_bboxes, num_anchors]
anchor_best_iou_ids = tf.argmax(anchor_ious, axis=0) # [num_anchors]
# anchor_best_ious = tf.gather_nd(anchor_ious, tf.stack([anchor_best_iou_ids, tf.range(num_anchors, dtype=anchor_best_iou_ids.dtype)], axis=-1))
anchor_best_ious = tf.reduce_max(anchor_ious, axis=0) # This faster, [num_anchors]
matched_idxes = tf.where(anchor_best_ious > overlap_threshold)[:, 0]
matched_idxes = tf.unique(tf.concat([matched_idxes, tf.argmax(anchor_ious, axis=-1)], axis=0))[0] # Ensure at leat one anchor selected for each bbox
matched_idxes_nd = tf.expand_dims(matched_idxes, -1)
best_match_indxes = tf.gather(anchor_best_iou_ids, matched_idxes)
best_match_labels = tf.gather(labels, best_match_indxes)
# Mark anchors classes, iou < ignore_threshold as 0, ignore_threshold < iou < overlap_threshold as -1
anchor_classes = tf.where(anchor_best_ious > ignore_threshold, tf.cast(-1, bbox_labels.dtype), tf.cast(0, bbox_labels.dtype))
# Mark matched anchors classes, iou > overlap_threshold as actual labels
# anchor_classes = tf.where(anchor_best_ious > overlap_threshold, labels[anchor_best_iou_ids], anchor_classes)
anchor_classes = tf.tensor_scatter_nd_update(anchor_classes, matched_idxes_nd, tf.cast(best_match_labels, bbox_labels.dtype))
valid_anchors = tf.gather(anchors, matched_idxes)
valid_anchors_center, valid_anchors_hw = corners_to_center_yxhw_nd(valid_anchors)
bboxes_center, bboxes_hw = corners_to_center_yxhw_nd(bboxes)
bboxes_centers, bboxes_hws = tf.gather(bboxes_center, best_match_indxes), tf.gather(bboxes_hw, best_match_indxes)
encoded_anchors_center = (bboxes_centers - valid_anchors_center) / valid_anchors_hw
encoded_anchors_hw = tf.math.log(bboxes_hws / valid_anchors_hw)
encoded_anchors = tf.concat([encoded_anchors_center, encoded_anchors_hw], axis=-1)
dest_boxes = tf.zeros_like(anchors)
dest_boxes = tf.tensor_scatter_nd_update(dest_boxes, matched_idxes_nd, encoded_anchors)
rr = tf.concat([dest_boxes, tf.expand_dims(anchor_classes, -1)], axis=-1)
return rr
def decode_bboxes(preds, anchors):
if anchors.shape[-1] == 6: # Currently, it's yolor anchors
# anchors: [grid_y, grid_x, base_anchor_y, base_anchor_x, stride_y, stride_x]
bboxes_center = preds[:, :2] * 2 * anchors[:, 4:] + anchors[:, :2]
bboxes_hw = (preds[:, 2:4] * 2) ** 2 * anchors[:, 2:4]
else:
anchors_hw = anchors[:, 2:] - anchors[:, :2]
anchors_center = (anchors[:, :2] + anchors[:, 2:]) * 0.5
bboxes_center = preds[:, :2] * anchors_hw + anchors_center
bboxes_hw = tf.math.exp(preds[:, 2:4]) * anchors_hw
preds_top_left = bboxes_center - 0.5 * bboxes_hw
pred_bottom_right = preds_top_left + bboxes_hw
return tf.concat([preds_top_left, pred_bottom_right, preds[:, 4:]], axis=-1)
class AnchorFreeAssignMatching:
"""
This has to be after getting model output, as picking matched anchors needs the iou value between prediction and true bboxes.
# Basic test:
>>> from keras_cv_attention_models.coco import anchors_func
>>> aa = anchors_func.AnchorFreeAssignMatching([640, 640])
>>> # Fake data
>>> num_bboxes, num_classes, num_anchors = 32, 10, 8400
>>> bboxes_true = tf.random.uniform([num_bboxes, 4], 0, 0.5)
>>> bboxes_true = tf.concat([bboxes_true[:, :2], bboxes_true[:, 2:] + bboxes_true[:, :2]], axis=-1) # bottom, right > top, left
>>> labels_true = tf.one_hot(tf.random.uniform([num_bboxes], 0, num_classes, dtype=tf.int32), num_classes)
>>> valid_bboxes_pick = tf.cast(tf.random.uniform([num_bboxes, 1]) > 0.5, tf.float32)
>>> bbox_labels_true = tf.concat([bboxes_true, labels_true, valid_bboxes_pick], axis=-1)
>>> bbox_labels_pred = tf.random.uniform([num_anchors, 4 + num_classes + 1])
>>> # Run test
>>> bbox_labels_true_assined = aa(bbox_labels_true, bbox_labels_pred)
>>> bboxes_true, bboxes_true_encoded, labels_true, object_true_idx_nd = tf.split(bbox_labels_true_assined, [4, 4, -1, 1], axis=-1)
>>> object_true_idx_nd = tf.cast(object_true_idx_nd, tf.int32)
>>> object_true_idx = object_true_idx_nd[:, 0]
>>> object_true = tf.tensor_scatter_nd_update(tf.zeros_like(bbox_labels_pred[:, -1]), object_true_idx_nd, tf.ones_like(bboxes_true[:, -1]))
>>> print(bboxes_true.shape, bboxes_true_encoded.shape, labels_true.shape, tf.reduce_sum(tf.cast(object_true, tf.float32)).numpy())
>>> # (42, 4), (42, 4), (42, 10), 42.0
>>> print(f"{object_true.shape = }, {bbox_labels_pred[object_true > 0].shape = }")
>>> # object_true.shape = TensorShape([8400]), bbox_labels_pred[object_true > 0].shape = TensorShape([42, 15])
# Actual assigning test:
>>> from keras_cv_attention_models import yolox, test_images
>>> from keras_cv_attention_models.coco import anchors_func, data
>>> mm = yolox.YOLOXS()
>>> img = test_images.dog_cat()
>>> pred = mm(mm.preprocess_input(img))
>>> aa = anchors_func.AnchorFreeAssignMatching([640, 640])
>>> bbs, lls, ccs = mm.decode_predictions(pred)[0]
>>> bbox_labels_true = tf.concat([bbs, tf.one_hot(lls, 80), tf.ones([bbs.shape[0], 1])], axis=-1)
>>> bbox_labels_true_assined = aa(bbox_labels_true, pred[0])
>>> bboxes_true, bboxes_true_encoded, labels_true, object_true_idx_nd = tf.split(bbox_labels_true_assined, [4, 4, -1, 1], axis=-1)
>>> object_true_idx_nd = tf.cast(object_true_idx_nd, tf.int32)
>>> object_true_idx = object_true_idx_nd[:, 0]
>>> object_true = tf.tensor_scatter_nd_update(tf.zeros_like(pred[0, :, -1]), | |
can be downloaded
filename (str): filename for the sound
path (str): path where to save the soundfile, if None file will
be saved at current working directory
Returns
-------
None
"""
response = requests.get(url, headers=self.headers)
abs_path = os.path.join(path, filename)
logging.info("Downloading from %s to %s" % (url, abs_path))
with open(abs_path, "wb") as f:
f.write(response.content)
def download_similar_sounds(self, sound_id, path=None, duration_limit=None,
min_samplerate=None, max_similars=None,
preview_type='preview-lq-mp3'):
""" Method for downloading similar sounds to a given sound id.
Per default the low quality mp3 version will be downloaded.
Each sound in the directory will be named in the following manner:
distance_to_target _ id _ . fileending
Parameters
----------
sound_id (int): id of the sound to which retrieve similar sounds
path (optional(str)): path where to download the soundfiles,
if no path is specified everything will be saved to cwd
duration_limit (optional(int)): include only sounds with a
duration <= duration_limit (in seconds), default=20
min_samplerate (optional(int)): include only sounds with a
samplerate >= min_samplerate, default=44100
max_similars (optional(int)): maximum of similar sounds to be
returned, if None all similar sounds which do not violate one
of the above constraints will be included, maximum returned
by freesound api are 14 similar sounds
preview_type (optional(str)): which preview type should be
downloaded, options: 'preview-lq-ogg', 'preview-hq-mp3',
'preview-hq-ogg', 'preview-lq-mp3', default='preview-lq-mp3'
Returns
-------
None
"""
# first download sound itself
sound = self.sound_instance(sound_id)
filename = "0-" + str(sound_id) + "." + preview_type.split("-")[-1]
url = sound['previews'][preview_type]
self.download(url, filename=filename, path=path)
# get a dict containing all similar sounds to the sound
similar_sounds = self.similar_sound_instances_to_dict(
sound_id, duration_limit=duration_limit,
min_samplerate=min_samplerate, max_similars=max_similars)
# download all the sound
for similar_id, similar_sound in similar_sounds.items():
url = similar_sound['previews'][preview_type]
# filename: sound_id - distance - similar_id . file_ending
distance = "%.2f" % similar_sound['distance_to_target']
filename = "-".join([str(sound_id), distance, str(similar_id)])
filename += "." + preview_type.split("-")[-1] # file ending
self.download(url, filename=filename, path=path)
def querysets_json(self, start_ids, filename,
fields=["id", "url", "name", "duration", "samplerate",
"previews", "similar_sounds"], flat=False,
duration_limit=None, min_samplerate=None,
max_similars=None):
""" Generates a json file containing information on all sounds given
in start_ids and their similar sounds.
This method was used for building the audiofiles.json file.
For every sound information will be stored in the following manner:
<Freesound_id>: {
"name": <name on Freesound>,
"url": <url to Freesound>,
"previews": {
"preview-lq-ogg": <url to low-quality .ogg preview>,
"preview-lq-mp3": <url to low-quality .mp3 preview>,
"preview-hq-ogg": <url to high-quality .ogg preview>,
"preview-hq-mp3": <url to high-quality .mp3 preview>
},
"distance_to_target": <distance to the start_id (target)>,
"duration": <duration in seconds>,
"samplerate": <samplerate>,
"similar_sounds": <url to similar sounds>,
"id": <the id again>,
"target": <id of the start_id (target)>
}
Parameters
----------
start_ids (list[int]): the ids for which to include similar sounds
filename (str): filename where to save the generated json file
fields (optional(list[str])): fields for narrow the request,
for alternatives to the defaut fields see:
default fields:
id (int): the sound id
url (str): url to the sound
name (str): name given by the user who uploaded the sound
duration (float): duration of the sound in seconds
samplerate (float): samplerate of the sound
previews (dict[str:str]): urls to four different types
of previews (high/low quality and .mp3 or .ogg files):
'preview-lq-ogg', 'preview-lq-mp3',
'preview-hq-ogg', 'preview-hq-mp3'
similar_sounds (str): url to page with results of similar
sounds search
duration_limit (optional(int)): include only sounds with a
duration <= duration_limit (in seconds), default=20
min_samplerate (optional(int)): include only sounds with a
samplerate >= min_samplerate, default=44100
max_similars (optional(int)): maximum of similar sounds to be
returned, if None all similar sounds which do not violate one
of the above constraints will be included, maximum returned
by freesound api are 14 similar sounds
Returns
-------
None
"""
logging.info("Building json database at '%s'" % filename)
j = dict()
# add start sound instances to dict
logging.info("Adding %d start_ids" % len(start_ids))
instances = self.sound_instances_to_dict(start_ids, fields=fields,
flat=flat,
duration_limit=duration_limit)
j.update(instances)
# add similar sound instances to dict
logging.info("Adding similar sounds to start_ids")
for start_id in start_ids:
similars = self.similar_sound_instances_to_dict(
start_id, fields=fields, flat=flat,
duration_limit=duration_limit, max_similars=max_similars)
j.update(similars)
logging.info("FINISHED building database with %d sounds" % len(j))
logging.info("Writing json formatted to file %s" % filename)
# write json like dict to file
with open(filename, "w") as f:
json.dump(j, f, indent=4)
def distances(self, query_id, sounds_to_be_compared):
""" Returns the distance between one sound and any number of sounds.
Combined search is needed for this, therefore the request might take
some time. If you use a lot of sounds this might really really take
some time.
Note: The usage of the Freesound API is limited to max 2000 requests
per day. If you need to compare a lot of sounds you might want to ask
the administrators to give you more permissive limits.
Parameters
----------
query_id (int): the id of the sound
sounds_to_be_compared (list[int]): all sounds for which the
distance the the given query should be returned
output_file (str): file where results will be print to
Returns
-------
distances (list[(str, str, int)]): a list of tuples containing
id, distance of this id to the query and page at which the
result was found during combined search
Examples
--------
>>> a = freesound_api()
"""
# prepare string containing sounds for reuqest url
filter_sounds = '+OR+'.join(['id:%d' % sound
for sound in sounds_to_be_compared])
url = 'http://www.freesound.org/apiv2/search/combined/?'
url += 'target=%s&filter=(%s)' % (query_id, filter_sounds)
# during combined search is not guaranteed that results will be
# at the first page, therefore you need to iterate all pages
# until every needed distance is found
# this is why we need the following counters
pages = 1
found_distances = 0
distances = []
# start request
logging.info('Get distance between %d and %s'
% (query_id, sounds_to_be_compared))
while True:
try:
request = requests.get(url, headers=self.headers)
result = request.json()
if not result['results']: # go on searching on next page
logging.info('No results on page %d...' % pages)
pages += 1
url = result['more']
else:
# there may be more than one result at this page
for i, s in enumerate(result['results']):
sound = result['results'][i]['id']
distance = result['results'][i]['distance_to_target']
logging.info('Found distance %s at page %d to sound %s'
% (str(distance), pages, sound))
distances.append((sound, distance, pages))
found_distances += 1
if (found_distances == len(sounds_to_be_compared)):
# all distances found
break
else:
url = result['more']
pages += 1
except KeyError:
logging.info(result) # some problem qith request
def all_distances(self, all_sounds, queries,
output_file='freesound_distances.txt'):
""" Method for getting all distances between sounds and another
set of sounds.
Results will be printed to a given file.
Thie method was used for retrieving all Freesound intern distances
between the 10 queries and all 150 sounds within D1.
It is assumed that queries are << than sounds_to_be_compared.
The file will contain the following columns:
- query id (from one of the sounds)
- sound id (from one of the sounds to be compared)
- distance (between the two according to Freesound)
- page (in which page during combined search the distance was found)
Parameters
----------
all_sounds(list[int]): the sounds which sould be compared with the
queries
queries (list[int]): the queries to which all sounds should be
compared
Returns
-------
None
"""
with open(output_file, 'w') as f:
for sound in all_sounds:
distances = self.distances(sound, queries)
for query, distance, pages in distances:
f.write('%s, %d, %s, %d\n' %
(query, sound, distance, pages))
def freesound_original_top_150_distances(self, queries, output_file):
""" Method for retrieving the distances for the first 150 similar
sounds to every of the given query.
This method was used to get the original Freesound distances for the
ten queries from D1. Not that the first 14 distances will belong
to sounds from D1, but the following may be any sounds from Freesound.
The file will contain one column for every query and 149 rows
in which the distances for every similar sound and query is written.
Parameters
----------
queries (list[int]): a list of the queries for which the distances
of the top 150 similar should be retrieved
output_file (str): file where the results should be print to
Returns
------
None
"""
with open(output_file, 'w') as f:
all_distances = []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.