input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
66.765953, 66.794791], [0.2261720687866211, 94.35, 66.739562, 133.534386],
[0.21316884002685546, 95.22, 69.30083, 202.835249], [0.23311994018554688, 94.35, 67.680442, 270.51573],
[0.21979405059814452, 94.47, 67.75681, 338.272574], [0.17282760467529296, 95.75, 68.048425, 406.321034],
[0.16720095138549804, 96.11, 67.926306, 474.247376], [0.18494547576904297, 95.91, 68.730178, 542.977589],
[0.18282345581054688, 95.72, 70.338317, 613.315941], [0.18329752807617186, 95.51, 69.023614, 682.33959]], (
15, 15, 'Adam', 0.001, 32): [[0.17425620040893555, 94.78, 66.413499, 66.442689],
[0.14271494979858398, 95.95, 66.64875, 133.091473],
[0.13140846939086914, 96.41, 66.429179, 199.520687],
[0.1262480613708496, 96.63, 66.676557, 266.197281],
[0.13336401138305665, 96.26, 66.62411, 332.82143],
[0.16193838653564452, 95.72, 66.532993, 399.354458],
[0.1415913833618164, 96.83, 67.579366, 466.93386],
[0.1342766746520996, 96.91, 67.182477, 534.116373],
[0.15374485702514648, 96.53, 67.082368, 601.198775],
[0.1270507278442383, 96.81, 67.546949, 668.745758]],
(15, 15, 'Adam', 0.0005, 32): [
[0.1489730255126953, 95.59, 67.011095, 67.04], [0.11819353179931641, 96.49, 66.870618, 133.910651],
[0.11514097900390625, 96.56, 66.527206, 200.437892], [0.11393206329345704, 96.53, 66.525562, 266.963489],
[0.11515245971679687, 96.87, 66.523953, 333.487476], [0.12096667785644531, 96.85, 66.704906, 400.192416],
[0.12064556579589844, 96.91, 66.814515, 467.006966], [0.11913826141357421, 97.13, 66.636749, 533.643748],
[0.13150388488769532, 97.02, 66.88322, 600.527002], [0.13392695617675782, 97.07, 67.959699, 668.486741]], (
5, 20, 'Adam', 0.004, 32): [[0.30820848388671873, 91.85, 36.048636, 36.077527],
[0.29422505493164064, 93.11, 36.123736, 72.201309],
[0.3207913558959961, 93.42, 36.604075, 108.805418],
[0.24748704071044922, 94.31, 36.225095, 145.03055],
[0.29027782440185546, 92.93, 36.201157, 181.231742],
[0.401851220703125, 91.64, 36.179906, 217.411683],
[0.29850867462158204, 92.98, 36.948241, 254.35996],
[0.2874802261352539, 93.46, 37.396117, 291.756113],
[0.29607855529785154, 93.62, 36.89327, 328.649419],
[0.2812522018432617, 94.29, 36.368594, 365.01805]], (5, 20, 'Adam', 0.002, 32): [
[0.23804049530029298, 93.53, 36.826994, 36.855262], [0.1830996078491211, 95.03, 36.102245, 72.957539],
[0.15536177215576172, 95.76, 36.726958, 109.684532], [0.1619378952026367, 96.15, 36.800398, 146.484965],
[0.16667451477050782, 96.1, 36.247062, 182.732062], [0.14687996292114258, 96.3, 36.501448, 219.233544],
[0.18636453247070311, 95.47, 36.712885, 255.946462], [0.18888533935546875, 95.16, 36.674278, 292.620774],
[0.13492145462036134, 96.72, 36.088853, 328.709661], [0.18403785858154298, 96.5, 36.391981, 365.101676]], (
5, 20, 'Adam', 0.001, 32): [[0.17605555877685547, 94.52, 36.443898, 36.472313],
[0.15965280609130858, 95.33, 37.81814, 74.290501],
[0.130964493560791, 96.25, 36.298009, 110.588546],
[0.13877102127075194, 96.36, 36.583926, 147.172508],
[0.12486910934448242, 96.56, 36.54849, 183.721033],
[0.13766362380981445, 96.49, 36.426926, 220.147995],
[0.1342475082397461, 96.62, 36.747479, 256.895511],
[0.14888114013671874, 96.55, 36.58923, 293.484784],
[0.12716330184936522, 96.78, 36.294013, 329.778837],
[0.16149766235351562, 96.66, 36.933958, 366.712831]],
(5, 20, 'Adam', 0.0005, 32): [
[0.19801729583740235, 94.13, 36.156011, 36.18459], [0.14019447860717774, 95.78, 36.796445, 72.981068],
[0.11827448806762696, 96.59, 37.219951, 110.201052], [0.1167128921508789, 96.81, 36.562355, 146.763441],
[0.10441825714111329, 96.77, 36.729476, 183.49295], [0.10669879379272461, 96.89, 36.596476, 220.08946],
[0.11220286331176758, 96.67, 36.683877, 256.77337], [0.10638168525695801, 97.17, 36.068469, 292.841876],
[0.11640414962768554, 96.8, 36.52551, 329.36742], [0.10429597854614257, 97.29, 36.777458, 366.144914]], (
10, 20, 'Adam', 0.004, 32): [[0.4705408782958984, 88.3, 53.269304, 53.297898],
[0.3901649108886719, 93.46, 52.567302, 105.865253],
[0.27738614959716795, 93.3, 52.749209, 158.614499],
[0.38698813781738284, 91.44, 52.785033, 211.399568],
[0.524415087890625, 89.19, 53.907054, 265.30666],
[0.6881028137207031, 84.56, 53.648489, 318.955184],
[0.36339465026855466, 91.36, 53.223961, 372.179181],
[0.2788296432495117, 93.48, 53.018929, 425.198147],
[0.38091158752441406, 91.65, 53.525769, 478.723964],
[0.41544855651855467, 91.3, 53.813797, 532.537798]],
(10, 20, 'Adam', 0.002, 32): [
[0.24587471618652343, 94.1, 52.5289, 52.557641], [0.15374049453735353, 96.2, 53.363553, 105.921231],
[0.15984734649658203, 95.67, 54.271906, 160.193173], [0.18022706756591797, 96.07, 53.145765, 213.338972],
[0.17899822692871092, 95.9, 53.591056, 266.930063], [0.16211663131713866, 95.77, 53.629085, 320.559184],
[0.19333703308105468, 95.1, 53.731113, 374.290332], [0.17760402374267578, 96.19, 54.336991, 428.627358],
[0.16920812301635743, 95.9, 54.376756, 483.00415], [0.14778341217041016, 96.56, 54.090715, 537.094902]], (
10, 20, 'Adam', 0.001, 32): [[0.16233031311035157, 95.37, 52.804315, 52.833256],
[0.13075369644165039, 96.63, 52.890847, 105.724139],
[0.13949074020385743, 96.21, 53.40748, 159.131655],
[0.1414550437927246, 96.71, 52.971898, 212.103589],
[0.11727464981079101, 97.13, 52.975783, 265.07941],
[0.23183643646240235, 94.82, 53.020964, 318.100411],
[0.1370917449951172, 96.72, 53.392741, 371.493189],
[0.16373165969848633, 96.29, 53.638821, 425.132048],
[0.15415760269165038, 96.59, 54.174043, 479.306129],
[0.12442790222167968, 97.05, 53.940963, 533.24713]],
(10, 20, 'Adam', 0.0005, 32): [
[0.16046488342285156, 95.08, 52.445393, 52.474012], [0.10928079528808594, 96.85, 52.868492, 105.342539],
[0.11191147003173828, 96.81, 52.478773, 157.821349], [0.10601462936401367, 97.26, 52.523609, 210.344994],
[0.12210070266723633, 96.93, 52.648267, 262.993308], [0.10662662582397461, 97.22, 52.631511, 315.624855],
[0.10914044265747071, 97.25, 52.770593, 368.395483], [0.12668590469360352, 97.32, 52.588128, 420.983646],
[0.1187382713317871, 97.23, 52.923681, 473.907363], [0.13876376419067382, 97.11, 53.357261, 527.264661]], (
15, 20, 'Adam', 0.004, 32): [[0.4836129180908203, 90.62, 69.516394, 69.545753],
[0.343066748046875, 92.32, 70.180529, 139.726317],
[0.3418696533203125, 92.16, 70.756712, 210.483065],
[0.6358513671875, 88.54, 70.943121, 281.426223],
[0.41990096435546875, 91.09, 71.8039, 353.230178],
[0.45815287170410157, 89.01, 71.481832, 424.712046],
[0.43645419921875, 91.01, 71.47152, 496.183601],
[0.33761297912597654, 92.71, 71.453375, 567.637011],
[0.36084500732421876, 92.2, 81.877767, 649.514813],
[0.6465879699707031, 88.01, 72.329394, 721.844246]],
(15, 20, 'Adam', 0.002, 32): [
[0.20082282867431642, 94.68, 68.706672, 68.737282], [0.20961283264160158, 94.39, 69.278253, 138.01557],
[0.2490068588256836, 94.29, 69.585117, 207.600724], [0.3632061004638672, 91.97, 69.83263, 277.433389],
[0.21016073455810547, 95.52, 69.981472, 347.414897], [0.17851744079589843, 95.65, 70.614291, 418.029225],
[0.2134849609375, 95.66, 71.244967, 489.274226], [0.2124144287109375, 95.45, 70.9172, 560.191464],
[0.19736182708740235, 95.47, 71.353261, 631.544761], [0.22190050201416014, 95.4, 71.654503, 703.199299]], (
15, 20, 'Adam', 0.001, 32): [[0.13824123764038085, 96.06, 69.186784, 69.216062],
[0.14146766510009764, 96.12, 68.716826, 137.932924],
[0.11720893478393554, 96.91, 68.580989, 206.513949],
[0.11383481140136718, 96.86, 68.712765, 275.22675],
[0.12942663192749024, 96.65, 68.698601, 343.925385],
[0.1437689552307129, 96.52, 69.596611, 413.52203],
[0.15887965393066406, 96.74, 70.142237, 483.664301],
[0.1293421096801758, 96.92, 70.437969, 554.102309],
[0.1237819351196289, 96.88, 70.411818, 624.514162],
[0.171864315032959, 96.5, 71.905299, 696.419497]], (15, 20, 'Adam', 0.0005, 32): [
[0.13187027130126952, 95.96, 71.072866, 71.101939], [0.13658984985351563, 96.24, 68.572111, 139.674087],
[0.11151452255249024, 96.8, 68.423517, 208.09764], [0.11204429931640625, 97.16, 68.552867, 276.650557],
[0.11940365753173827, 97.22, 68.649824, 345.300417], [0.11482046203613282, 97.16, 68.832956, 414.133408],
[0.12603187713623046, 97.34, 68.736921, 482.870368], [0.11502775268554688, 97.54, 69.114952, 551.985354],
[0.14357055282592773, 97.19, 70.229854, 622.215242], [0.12789841995239257, 97.23, 70.221971, 692.437249]], (
5, 25, 'Adam', 0.004, 32): [[0.31567224731445315, 92.47, 37.266911, 37.295281],
[0.3297391845703125, 93.1, 36.955753, 74.251068],
[0.25870048828125, 94.19, 36.537907, 110.78901],
[0.2271037124633789, 94.07, 37.191593, 147.980638],
[0.24558405151367188, 94.03, 37.960093, 185.940767],
[0.23579429473876953, 94.52, 37.14511, 223.085912],
[0.21484208068847657, 94.99, 36.911357, 259.997305],
[0.29334081573486326, 93.2, 37.224718, 297.22206],
[0.30202613372802734, 93.11, 36.98251, 334.204613],
[0.23647701416015626, 94.27, 37.41892, 371.623568]], (5, 25, 'Adam', 0.002, 32): [
[0.23931537017822266, 93.82, 37.051916, 37.080413], [0.17171802978515624, 95.67, 36.643452, 73.7239],
[0.18616173400878908, 95.17, 37.622811, 111.346748], [0.1925108459472656, 95.2, 37.048665, 148.395448],
[0.16016440048217773, 96.18, 37.598476, 185.993958], [0.1991802734375, 95.75, 37.287338, 223.281331],
[0.1762312026977539, 96.05, 36.87518, 260.156546], [0.20306101684570313, 95.62, 37.557547, 297.714129],
[0.18633356628417969, 96.33, 37.526521, 335.240686], [0.2032092819213867, 96.03, 38.022493, 373.263247]], (
5, 25, 'Adam', 0.001, 32): [[0.1653879669189453, 95.05, 37.507432, 37.53584],
[0.1582515106201172, 95.86, 36.971717, 74.507594],
[0.12369730224609375, 96.52, 36.998193, 111.505822],
[0.13722529067993164, 96.31, 37.161118, 148.666977],
[0.1219148811340332, 96.73, 36.7579, 185.424912],
[0.12983333587646484, 96.84, 37.160845, 222.585792],
[0.12188743133544921, 97.33, 37.272932, 259.858775],
[0.12790887298583983, 96.92, 37.907028, 297.765838],
[0.113337744140625, 97.32, 38.314315, 336.080191],
[0.1272571647644043, 97.17, 37.066735, 373.14696]], (5, 25, 'Adam', 0.0005, 32): [
[0.19237369842529298, 94.32, 36.708072, 36.736491], [0.14381300735473632, 95.98, 36.847984, 73.584509],
[0.11615434875488281, 96.7, 37.070226, 110.65477], [0.11727920379638672, 96.67, 37.017791, 147.672597],
[0.10947379989624023, 96.68, 36.999618, 184.672251], [0.1135463607788086, 97.12, 37.401759, 222.074048],
[0.11861457443237304, 96.87, 37.010849, 259.084934], [0.11105342025756836, 97.02, 36.772924, 295.857895],
[0.10530190200805664, 97.41, 37.292395, 333.150327], [0.12929539489746095, 96.77, 36.864111, 370.014475]], (
10, 25, 'Adam', 0.004, 32): [[0.5102248504638672, 90.05, 53.855514, 53.884993],
[0.37952768859863284, 91.98, 54.486266, 108.371296],
[0.3560810089111328, 91.97, 54.807829, 163.179163],
[0.36939797973632815, 91.03, 54.952684, 218.131885],
[0.3212246734619141, 92.11, 54.873566, 273.005504],
[0.44249183654785157, 91.1, 54.908803, 327.914344],
[0.3163163757324219, 92.74, 54.985621, 382.900013],
[0.3820402709960937, 92.64, 55.136341, 438.036391],
[0.31153206939697264, 93.11, 55.800903, 493.837332],
[0.3631453430175781, 92.19, 55.07919, 548.916575]], (10, 25, 'Adam', 0.002, 32): [
[0.18830061950683594, 94.74, 53.567736, 53.596513], [0.20787491607666014, 94.62, 53.829957, 107.426504],
[0.1995637451171875, 95.72, 54.168625, 161.595163], [0.20067914428710937, 95.31, 54.476084, 216.071283],
[0.25281692504882813, 95.75, 54.867139, 270.938457], [0.18089910736083983, 95.71, 54.857564, 325.796058],
[0.2539282684326172, 94.62, 55.055426, 380.85152], [0.22642360534667968, 95.5, 55.295322, 436.146879],
[0.2014279815673828, 95.79, 55.512741, 491.659655], [0.23798985748291016, 95.6, 55.822877, 547.482568]], (
10, 25, 'Adam', 0.001, 32): [[0.15383016510009764, 95.85, 53.652781, 53.681671],
[0.15182426223754883, 95.93, 53.53411, 107.215818],
[0.13213476638793945, 96.4, 53.598537, 160.81439],
[0.12907257919311524, 96.86, 53.871681, 214.686106],
[0.1540932243347168, 96.87, 53.900896, 268.587038],
[0.12946163787841797, 96.99, 54.627056, 323.214147],
[0.1233955924987793, 97.12, 56.509072, 379.723257],
[0.12417288131713868, 97.16, 54.59842, 434.321714],
[0.14135757369995117, 97.25, 55.164297, 489.486047],
[0.17177118453979492, 96.81, 55.241041, 544.727124]],
(10, 25, 'Adam', 0.0005, 32): [
[0.1438319320678711, 96.01, 53.733983, 53.780667], [0.11615073928833008, 96.69, 53.286582, 107.067286],
[0.12114904251098634, 96.73, 53.276295, 160.343616], [0.10914976577758789, 96.94, 53.3202, 213.66385],
[0.1499665626525879, 96.52, 53.350896, 267.014781], [0.11402848091125488, 97.39, 53.600495, 320.615311],
[0.13012537536621094, 97.09, 53.891038, 374.506384], [0.12138752365112304, 97.35, 53.721534, 428.227953],
[0.13014474411010743, 97.57, 54.139165, 482.367152], [0.13419246368408203, 97.44, 54.657341, 537.024527]], (
15, 25, 'Adam', 0.004, 32): [[0.5543128845214844, 87.52, 72.503189, 72.53273],
[0.49530757751464843, 88.5, 73.647922, 146.180687],
[0.511628369140625, 89.28, 73.803597, 219.984321],
[0.39625316162109375, 90.67, 74.452957, 294.437323],
[0.3819243133544922, 90.88, 74.277672, 368.715034],
[0.3754981201171875, 92.68, 76.234891, 444.949963],
[0.42379490661621094, 90.24, 74.545606, 519.495607],
[0.5753708465576172, 90.39, 74.32745, 593.823092],
[0.39811470947265626, 90.71, 73.96023, 667.783357],
[0.5002307647705078, 90.33, 74.313644, 742.097039]],
(15, 25, 'Adam', 0.002, 32): [
[0.21526619873046876, 94.21, 71.828205, 71.857576], [0.18489559631347657, 95.56, 72.236191, 144.093802],
[0.19916227569580078, 95.54, 72.941544, 217.035381], [0.17132504577636717, 95.88, 73.989308, 291.024724],
[0.20679649963378907, 95.11, 74.210276, 365.235035], [0.18597142181396484, 95.85, 74.378936, 439.614008],
[0.1964055694580078, 95.54, 74.80926, 514.423303], [0.20234808044433594, 95.44, 75.810213, 590.233552],
[0.2032507942199707, 96.12, 75.752223, 665.985812], [0.2578329147338867, 95.32, 75.603376, 741.589225]], (
15, 25, 'Adam', 0.001, 32): [[0.14373629989624023, 95.76, 71.526642, 71.555866],
[0.1483383117675781, 96.05, 72.434907, 143.990809],
[0.12494604873657227, 96.79, 71.93153, 215.922375],
[0.1405346015930176, 96.56, 72.15737, 288.079781],
[0.13852364273071288, 96.49, 73.18548, 361.265296],
[0.13980856399536132, 96.85, 73.705133, 434.970465],
[0.1706570655822754, 96.66, 73.994712, 508.965212],
[0.16113263626098634, 96.96, 75.186923, 584.152172],
[0.18133347930908203, 96.93, 76.12918, 660.281386],
[0.1506628173828125, 96.91, 76.871451, 737.152872]],
(15, 25, 'Adam', 0.0005, 32): [
[0.12534110717773436, 96.19, 71.600989, 71.630172], [0.1280417495727539, 96.72, 71.339757, 142.969964],
[0.10546344680786132, 97.14, 71.328308, 214.298306], [0.10780036697387696, 97.18, 71.839473, 286.137814],
[0.12262848281860352, 97.03, 71.816119, 357.953966], [0.17044371109008788, 96.66, 72.464751, 430.418753],
[0.14115094604492187, 97.12, 73.447922, 503.866709], [0.1465856460571289, 97.34, 74.93387, 578.800614],
[0.17595641784667967, 96.91, 77.528656, 656.329307], [0.15566091232299806, 97.16, 74.615386, 730.944727]], (
5, 30, 'Adam', 0.004, 32): [[0.39286899719238283, 91.46, 37.609751, 37.638122],
[0.2398052032470703, 94.05, 37.546452, 75.184609],
[0.3262681335449219, 92.78, 37.400123, 112.584767],
[0.33355901794433596, 92.94, 37.774055, 150.358859],
[0.30850332641601563, 93.34, 37.996738, 188.355635],
[0.2681515335083008, 94.09, 37.235959, 225.59163],
[0.2342025421142578, 94.78, 37.405773, 262.997441],
[0.24029777374267577, 94.34, 37.437867, 300.435344],
[0.24579509887695314, 94.5, 39.206572, 339.641954],
[0.3870605529785156, 92.47, 39.735184, 379.377175]], | |
cons170,
)
rule6620 = ReplacementRule(pattern6620, replacement6620)
pattern6621 = Pattern(
Integral(
x_ ** WC("m", S(1))
* acoth(
f_ ** (x_ * WC("d", S(1)) + WC("c", S(0))) * WC("b", S(1))
+ WC("a", S(0))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons127,
cons20,
cons170,
)
rule6621 = ReplacementRule(pattern6621, replacement6621)
pattern6622 = Pattern(
Integral(
WC("u", S(1))
* atanh(
WC("c", S(1)) / (x_ ** WC("n", S(1)) * WC("b", S(1)) + WC("a", S(0)))
)
** WC("m", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons4,
cons19,
cons1768,
)
rule6622 = ReplacementRule(pattern6622, replacement6622)
pattern6623 = Pattern(
Integral(
WC("u", S(1))
* acoth(
WC("c", S(1)) / (x_ ** WC("n", S(1)) * WC("b", S(1)) + WC("a", S(0)))
)
** WC("m", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons4,
cons19,
cons1768,
)
rule6623 = ReplacementRule(pattern6623, replacement6623)
pattern6624 = Pattern(
Integral(
S(1)
/ (
sqrt(x_ ** S(2) * WC("b", S(1)) + WC("a", S(0)))
* atanh(
x_
* WC("c", S(1))
/ sqrt(x_ ** S(2) * WC("b", S(1)) + WC("a", S(0)))
)
),
x_,
),
cons2,
cons3,
cons8,
cons1941,
)
rule6624 = ReplacementRule(pattern6624, replacement6624)
pattern6625 = Pattern(
Integral(
S(1)
/ (
sqrt(x_ ** S(2) * WC("b", S(1)) + WC("a", S(0)))
* acoth(
x_
* WC("c", S(1))
/ sqrt(x_ ** S(2) * WC("b", S(1)) + WC("a", S(0)))
)
),
x_,
),
cons2,
cons3,
cons8,
cons1941,
)
rule6625 = ReplacementRule(pattern6625, replacement6625)
pattern6626 = Pattern(
Integral(
atanh(x_ * WC("c", S(1)) / sqrt(x_ ** S(2) * WC("b", S(1)) + WC("a", S(0))))
** WC("m", S(1))
/ sqrt(x_ ** S(2) * WC("b", S(1)) + WC("a", S(0))),
x_,
),
cons2,
cons3,
cons8,
cons19,
cons1941,
cons68,
)
rule6626 = ReplacementRule(pattern6626, replacement6626)
pattern6627 = Pattern(
Integral(
acoth(x_ * WC("c", S(1)) / sqrt(x_ ** S(2) * WC("b", S(1)) + WC("a", S(0))))
** WC("m", S(1))
/ sqrt(x_ ** S(2) * WC("b", S(1)) + WC("a", S(0))),
x_,
),
cons2,
cons3,
cons8,
cons19,
cons1941,
cons68,
)
rule6627 = ReplacementRule(pattern6627, replacement6627)
pattern6628 = Pattern(
Integral(
atanh(x_ * WC("c", S(1)) / sqrt(x_ ** S(2) * WC("b", S(1)) + WC("a", S(0))))
** WC("m", S(1))
/ sqrt(x_ ** S(2) * WC("e", S(1)) + WC("d", S(0))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons1941,
cons385,
)
rule6628 = ReplacementRule(pattern6628, replacement6628)
pattern6629 = Pattern(
Integral(
acoth(x_ * WC("c", S(1)) / sqrt(x_ ** S(2) * WC("b", S(1)) + WC("a", S(0))))
** WC("m", S(1))
/ sqrt(x_ ** S(2) * WC("e", S(1)) + WC("d", S(0))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons1941,
cons385,
)
rule6629 = ReplacementRule(pattern6629, replacement6629)
pattern6630 = Pattern(
Integral(
(x_ ** S(2) * WC("d", S(1)) + WC("c", S(0))) ** n_
* atanh(x_ * WC("a", S(1))),
x_,
),
cons2,
cons8,
cons29,
cons810,
cons1588,
)
rule6630 = ReplacementRule(pattern6630, With6630)
pattern6631 = Pattern(
Integral(
(x_ ** S(2) * WC("d", S(1)) + WC("c", S(0))) ** n_
* acoth(x_ * WC("a", S(1))),
x_,
),
cons2,
cons8,
cons29,
cons810,
cons1588,
)
rule6631 = ReplacementRule(pattern6631, With6631)
pattern6632 = Pattern(
Integral(u_ * v_ ** WC("n", S(1)), x_),
cons820,
cons87,
cons465,
cons1942,
cons1943,
CustomConstraint(With6632),
)
rule6632 = ReplacementRule(pattern6632, replacement6632)
pattern6633 = Pattern(
Integral(u_ * v_ ** WC("n", S(1)), x_),
cons820,
cons87,
cons465,
cons1942,
cons1944,
CustomConstraint(With6633),
)
rule6633 = ReplacementRule(pattern6633, replacement6633)
pattern6634 = Pattern(
Integral(
atanh(
WC("c", S(0)) + WC("d", S(1)) * tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons1945,
)
rule6634 = ReplacementRule(pattern6634, replacement6634)
pattern6635 = Pattern(
Integral(
acoth(
WC("c", S(0)) + WC("d", S(1)) * tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons1945,
)
rule6635 = ReplacementRule(pattern6635, replacement6635)
pattern6636 = Pattern(
Integral(
atanh(
WC("c", S(0)) + WC("d", S(1)) / tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons1945,
)
rule6636 = ReplacementRule(pattern6636, replacement6636)
pattern6637 = Pattern(
Integral(
acoth(
WC("c", S(0)) + WC("d", S(1)) / tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons1945,
)
rule6637 = ReplacementRule(pattern6637, replacement6637)
pattern6638 = Pattern(
Integral(
atanh(
WC("c", S(0)) + WC("d", S(1)) * tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons1946,
)
rule6638 = ReplacementRule(pattern6638, replacement6638)
pattern6639 = Pattern(
Integral(
acoth(
WC("c", S(0)) + WC("d", S(1)) * tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons1946,
)
rule6639 = ReplacementRule(pattern6639, replacement6639)
pattern6640 = Pattern(
Integral(
atanh(
WC("c", S(0)) + WC("d", S(1)) / tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons1946,
)
rule6640 = ReplacementRule(pattern6640, replacement6640)
pattern6641 = Pattern(
Integral(
acoth(
WC("c", S(0)) + WC("d", S(1)) / tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons1946,
)
rule6641 = ReplacementRule(pattern6641, replacement6641)
pattern6642 = Pattern(
Integral(
(x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1))
* atanh(
WC("c", S(0)) + WC("d", S(1)) * tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons64,
cons1945,
)
rule6642 = ReplacementRule(pattern6642, replacement6642)
pattern6643 = Pattern(
Integral(
(x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1))
* acoth(
WC("c", S(0)) + WC("d", S(1)) * tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons64,
cons1945,
)
rule6643 = ReplacementRule(pattern6643, replacement6643)
pattern6644 = Pattern(
Integral(
(x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1))
* atanh(
WC("c", S(0)) + WC("d", S(1)) / tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons64,
cons1945,
)
rule6644 = ReplacementRule(pattern6644, replacement6644)
pattern6645 = Pattern(
Integral(
(x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1))
* acoth(
WC("c", S(0)) + WC("d", S(1)) / tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons64,
cons1945,
)
rule6645 = ReplacementRule(pattern6645, replacement6645)
pattern6646 = Pattern(
Integral(
(x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1))
* atanh(
WC("c", S(0)) + WC("d", S(1)) * tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons64,
cons1946,
)
rule6646 = ReplacementRule(pattern6646, replacement6646)
pattern6647 = Pattern(
Integral(
(x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1))
* acoth(
WC("c", S(0)) + WC("d", S(1)) * tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons64,
cons1946,
)
rule6647 = ReplacementRule(pattern6647, replacement6647)
pattern6648 = Pattern(
Integral(
(x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1))
* atanh(
WC("c", S(0)) + WC("d", S(1)) / tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons64,
cons1946,
)
rule6648 = ReplacementRule(pattern6648, replacement6648)
pattern6649 = Pattern(
Integral(
(x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1))
* acoth(
WC("c", S(0)) + WC("d", S(1)) / tanh(x_ * WC("b", S(1)) + WC("a", S(0)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons64,
cons1946,
)
rule6649 = ReplacementRule(pattern6649, replacement6649)
pattern6650 = Pattern(
Integral(atanh(tan(x_ * WC("b", S(1)) + WC("a", S(0)))), x_),
cons2,
cons3,
cons69,
)
rule6650 = ReplacementRule(pattern6650, replacement6650)
pattern6651 = Pattern(
Integral(acoth(tan(x_ * WC("b", S(1)) + WC("a", S(0)))), x_),
cons2,
cons3,
cons69,
)
rule6651 = ReplacementRule(pattern6651, replacement6651)
pattern6652 = Pattern(
Integral(atanh(S(1) / tan(x_ * WC("b", S(1)) + WC("a", S(0)))), x_),
cons2,
cons3,
cons69,
)
rule6652 = ReplacementRule(pattern6652, replacement6652)
pattern6653 = Pattern(
Integral(acoth(S(1) / tan(x_ * WC("b", S(1)) + WC("a", S(0)))), x_),
cons2,
cons3,
cons69,
)
rule6653 = ReplacementRule(pattern6653, replacement6653)
pattern6654 = Pattern(
Integral(
(x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1))
* atanh(tan(x_ * WC("b", S(1)) + WC("a", S(0)))),
x_,
),
cons2,
cons3,
cons50,
cons127,
cons64,
)
rule6654 = ReplacementRule(pattern6654, replacement6654)
pattern6655 = Pattern(
Integral(
(x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1))
* acoth(tan(x_ * WC("b", S(1)) + WC("a", S(0)))),
x_,
),
cons2,
cons3,
cons50,
cons127,
cons64,
)
rule6655 = ReplacementRule(pattern6655, replacement6655)
pattern6656 = Pattern(
Integral(
(x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1))
* atanh(S(1) / tan(x_ * WC("b", S(1)) + WC("a", S(0)))),
x_,
),
| |
# Copyright 2015 <NAME>
#
# This file is part of Platypus, a Python module for designing and using
# evolutionary algorithms (EAs) and multiobjective evolutionary algorithms
# (MOEAs).
#
# Platypus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Platypus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Platypus. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import sys
import copy
import math
import random
import operator
import itertools
import functools
from abc import ABCMeta, abstractmethod
from .core import Algorithm, ParetoDominance, AttributeDominance, \
AttributeDominance, nondominated_sort, nondominated_prune, \
nondominated_truncate, nondominated_split, crowding_distance, \
EPSILON, POSITIVE_INFINITY, Archive, EpsilonDominance, FitnessArchive, \
Solution, HypervolumeFitnessEvaluator, nondominated_cmp, fitness_key, \
crowding_distance_key, AdaptiveGridArchive, Selector, EpsilonBoxArchive, \
PlatypusError
from .operators import TournamentSelector, RandomGenerator, \
DifferentialEvolution, clip, UniformMutation, NonUniformMutation, \
GAOperator, SBX, PM, UM, PCX, UNDX, SPX, Multimethod
from .tools import DistanceMatrix, choose, point_line_dist, lsolve, \
tred2, tql2, check_eigensystem
from .weights import random_weights, chebyshev, normal_boundary_weights
from .config import default_variator, default_mutator
try:
set
except NameError:
from sets import Set as set
class AbstractGeneticAlgorithm(Algorithm):
__metaclass__ = ABCMeta
def __init__(self, problem,
population_size=100,
generator=RandomGenerator(),
**kwargs):
super(AbstractGeneticAlgorithm, self).__init__(problem, **kwargs)
self.population_size = population_size
self.generator = generator
self.result = []
def step(self):
if self.nfe == 0:
self.initialize()
self.result = self.population
else:
self.iterate()
self.result = self.population
def initialize(self):
self.population = [self.generator.generate(self.problem) for _ in range(self.population_size)]
self.evaluate_all(self.population)
@abstractmethod
def iterate(self):
raise NotImplementedError("method not implemented")
class SingleObjectiveAlgorithm(AbstractGeneticAlgorithm):
__metaclass__ = ABCMeta
def __init__(self, problem,
population_size=100,
generator=RandomGenerator(),
**kwargs):
super(SingleObjectiveAlgorithm, self).__init__(problem, population_size, generator, **kwargs)
if problem.nobjs != 1:
raise PlatypusError("can not instantiate single objective algorithm "
"on problem with %d objectives" % problem.nobjs)
class GeneticAlgorithm(SingleObjectiveAlgorithm):
def __init__(self, problem,
population_size=100,
offspring_size=100,
generator=RandomGenerator(),
selector=TournamentSelector(2),
comparator=ParetoDominance(),
variator=None,
**kwargs):
super(GeneticAlgorithm, self).__init__(problem, population_size, generator, **kwargs)
self.offspring_size = offspring_size
self.selector = selector
self.comparator = comparator
self.variator = variator
def initialize(self):
super(GeneticAlgorithm, self).initialize()
if self.variator is None:
self.variator = default_variator(self.problem)
def iterate(self):
offspring = []
while len(offspring) < self.offspring_size:
parents = self.selector.select(self.variator.arity, self.population)
offspring.extend(self.variator.evolve(parents))
self.evaluate_all(offspring)
self.population = offspring[:self.population_size]
class EvolutionaryStrategy(SingleObjectiveAlgorithm):
def __init__(self, problem,
population_size=100,
offspring_size=100,
generator=RandomGenerator(),
comparator=ParetoDominance(),
variator=None,
**kwargs):
super(EvolutionaryStrategy, self).__init__(problem, population_size, generator, **kwargs)
self.offspring_size = offspring_size
self.comparator = comparator
self.variator = variator
def initialize(self):
super(EvolutionaryStrategy, self).initialize()
if self.variator is None:
self.variator = default_mutator(self.problem)
def iterate(self):
offspring = []
offspring.extend(self.population)
for i in range(self.offspring_size):
parents = [self.population[i % len(self.population)]]
offspring.extend(self.variator.evolve(parents))
self.evaluate_all(offspring)
offspring = sorted(offspring, key=functools.cmp_to_key(self.comparator))
self.population = offspring[:self.population_size]
class NSGAII(AbstractGeneticAlgorithm):
def __init__(self, problem,
population_size=100,
generator=RandomGenerator(),
selector=TournamentSelector(2),
variator=None,
archive=None,
**kwargs):
super(NSGAII, self).__init__(problem, population_size, generator, **kwargs)
self.selector = selector
self.variator = variator
self.archive = archive
self.historic = []
def step(self):
if self.nfe == 0:
self.initialize()
else:
self.iterate()
if self.archive is not None:
self.result = self.archive
else:
self.result = self.population
def initialize(self):
super(NSGAII, self).initialize()
if self.archive is not None:
self.archive += self.population
if self.variator is None:
self.variator = default_variator(self.problem)
def iterate(self):
offspring = []
while len(offspring) < self.population_size:
parents = self.selector.select(self.variator.arity, self.population)
offspring.extend(self.variator.evolve(parents))
self.evaluate_all(offspring)
offspring.extend(self.population)
nondominated_sort(offspring)
self.population = nondominated_truncate(offspring, self.population_size)
# Print statistics
raioMin = min([x.objectives[0] for x in self.population])
bodeMin = min([x.objectives[1] for x in self.population])
iseMin = min([x.objectives[2] for x in self.population])
print(str(round(self.nfe / self.population_size)) + " \t| " + str(raioMin) + " \t| " + str(
bodeMin) + " \t| " + str(iseMin))
# Add to the historic
self.historic.append([raioMin, bodeMin, iseMin])
if self.archive is not None:
self.archive.extend(self.population)
class EpsMOEA(AbstractGeneticAlgorithm):
def __init__(self, problem,
epsilons,
population_size=100,
generator=RandomGenerator(),
selector=TournamentSelector(2),
variator=None,
**kwargs):
super(EpsMOEA, self).__init__(problem, population_size, generator, **kwargs)
self.selector = selector
self.variator = variator
self.dominance = ParetoDominance()
self.archive = EpsilonBoxArchive(epsilons)
def step(self):
if self.nfe == 0:
self.initialize()
else:
self.iterate()
self.result = self.archive
def initialize(self):
super(EpsMOEA, self).initialize()
self.archive += self.population
if self.variator is None:
self.variator = default_variator(self.problem)
def iterate(self):
if len(self.archive) <= 1:
parents = self.selector.select(self.variator.arity, self.population)
else:
parents = self.selector.select(self.variator.arity - 1, self.population) + [random.choice(self.archive)]
random.shuffle(parents)
children = self.variator.evolve(parents)
self.evaluate_all(children)
for child in children:
self._add_to_population(child)
self.archive.add(child)
def _add_to_population(self, solution):
dominates = []
dominated = False
for i in range(self.population_size):
flag = self.dominance.compare(solution, self.population[i])
if flag < 0:
dominates.append(i)
elif flag > 0:
dominated = True
if len(dominates) > 0:
del self.population[random.choice(dominates)]
self.population.append(solution)
elif not dominated:
self.population.remove(random.choice(self.population))
self.population.append(solution)
class GDE3(AbstractGeneticAlgorithm):
def __init__(self, problem,
population_size=100,
generator=RandomGenerator(),
variator=DifferentialEvolution(),
**kwargs):
super(GDE3, self).__init__(problem, population_size, generator, **kwargs)
self.variator = variator
self.dominance = ParetoDominance()
def select(self, i, arity):
indices = []
indices.append(i)
indices.extend(random.sample(list(range(0, i)) + list(range(i + 1, len(self.population))),
arity - 1))
return operator.itemgetter(*indices)(self.population)
def survival(self, offspring):
next_population = []
for i in range(self.population_size):
flag = self.dominance.compare(offspring[i], self.population[i])
if flag <= 0:
next_population.append(offspring[i])
if flag >= 0:
next_population.append(self.population[i])
nondominated_sort(next_population)
return nondominated_prune(next_population, self.population_size)
def initialize(self):
super(GDE3, self).initialize()
if self.variator is None:
self.variator = default_variator(self.problem)
def iterate(self):
offspring = []
for i in range(self.population_size):
parents = self.select(i, self.variator.arity)
offspring.extend(self.variator.evolve(parents))
self.evaluate_all(offspring)
self.population = self.survival(offspring)
class SPEA2(AbstractGeneticAlgorithm):
def __init__(self, problem,
population_size=100,
generator=RandomGenerator(),
variator=None,
dominance=ParetoDominance(),
k=1,
**kwargs):
super(SPEA2, self).__init__(problem, population_size, generator, **kwargs)
self.variator = variator
self.dominance = dominance
self.k = k
self.selection = TournamentSelector(2, dominance=AttributeDominance(fitness_key))
def _distance(self, solution1, solution2):
return math.sqrt(
sum([math.pow(solution2.objectives[i] - solution1.objectives[i], 2.0) for i in range(self.problem.nobjs)]))
def _assign_fitness(self, solutions):
strength = [0] * len(solutions)
fitness = [0.0] * len(solutions)
# compute dominance flags
keys = list(itertools.combinations(range(len(solutions)), 2))
flags = map(self.dominance.compare, [solutions[k[0]] for k in keys], [solutions[k[1]] for k in keys])
# compute the distance matrix
distanceMatrix = DistanceMatrix(solutions)
# count the number of individuals each solution dominates
for key, flag in zip(keys, flags):
if flag < 0:
strength[key[0]] += 1
elif flag > 0:
strength[key[1]] += 1
# the raw fitness is the sum of the dominance counts (strength) of all
# dominated solutions
for key, flag in zip(keys, flags):
if flag < 0:
fitness[key[1]] += strength[key[0]]
elif flag > 0:
fitness[key[0]] += strength[key[1]]
# add density to fitness
for i in range(len(solutions)):
fitness[i] += 1.0 / (distanceMatrix.kth_distance(i, self.k) + 2.0)
# assign fitness attribute
for i in range(len(solutions)):
solutions[i].fitness = fitness[i]
def _truncate(self, solutions, size):
survivors = [s for s in solutions if s.fitness < 1.0]
if len(survivors) < size:
remaining = [s for s in solutions if s.fitness >= 1.0]
remaining = sorted(remaining, key=fitness_key)
survivors.extend(remaining[:(size - len(survivors))])
else:
distanceMatrix = DistanceMatrix(survivors)
while len(survivors) > size:
most_crowded = distanceMatrix.find_most_crowded()
distanceMatrix.remove_point(most_crowded)
del survivors[most_crowded]
return survivors
def initialize(self):
super(SPEA2, self).initialize()
self._assign_fitness(self.population)
if self.variator is None:
self.variator = default_variator(self.problem)
def iterate(self):
offspring = []
while len(offspring) < self.population_size:
parents = self.selection.select(self.variator.arity, self.population)
offspring.extend(self.variator.evolve(parents))
self.evaluate_all(offspring)
offspring.extend(self.population)
self._assign_fitness(offspring)
self.population = self._truncate(offspring, self.population_size)
class MOEAD(AbstractGeneticAlgorithm):
def __init__(self, problem,
population_size=100,
neighborhood_size=10,
generator=RandomGenerator(),
variator=None,
delta=0.8,
eta=1,
update_utility=None,
weight_generator=random_weights,
scalarizing_function=chebyshev,
**kwargs):
super(MOEAD, self).__init__(problem, population_size, generator, **kwargs)
self.neighborhood_size = neighborhood_size
self.variator = variator
self.delta = delta
self.eta = eta
self.update_utility = update_utility
self.weight_generator = weight_generator
self.scalarizing_function = scalarizing_function
self.generation = 0
def _update_ideal(self, solution):
for i in range(self.problem.nobjs):
self.ideal_point[i] = min(self.ideal_point[i], solution.objectives[i])
def _calculate_fitness(self, solution, weights):
objs = solution.objectives
normalized_objs = [objs[i] - self.ideal_point[i] for i in range(self.problem.nobjs)]
return self.scalarizing_function(normalized_objs, weights)
def _update_solution(self, solution, mating_indices):
c = 0
random.shuffle(mating_indices)
for i in mating_indices:
candidate = self.population[i]
weights = self.weights[i]
replace = False
if solution.constraint_violation > 0.0 and candidate.constraint_violation > 0.0:
if solution.constraint_violation < candidate.constraint_violation:
replace = True
elif candidate.constraint_violation > 0.0:
replace = True
elif solution.constraint_violation > 0.0:
pass
elif self._calculate_fitness(solution, weights) < self._calculate_fitness(candidate, weights):
replace = True
if replace:
self.population[i] = solution
c = c + 1
if c >= self.eta:
break
def _sort_weights(self, base, weights):
"""Returns the index of weights nearest to the base weight."""
def compare(weight1, weight2):
dist1 = math.sqrt(sum([math.pow(base[i] - weight1[1][i], 2.0) for i in range(len(base))]))
dist2 = math.sqrt(sum([math.pow(base[i] - weight2[1][i], 2.0) for i in range(len(base))]))
if dist1 < dist2:
return -1
elif dist1 > dist2:
return 1
else:
return 0
sorted_weights = sorted(enumerate(weights), key=functools.cmp_to_key(compare))
return [i[0] for i in sorted_weights]
def initialize(self):
self.population = []
# initialize weights
self.weights = random_weights(self.population_size, self.problem.nobjs)
# initialize the neighborhoods based on weights
self.neighborhoods = []
for i | |
<reponame>xzluo97/MvMM-RegNet
# -*- coding: utf-8 -*-
"""
Network architectures for medical image registration.
@author: <NAME>
"""
from __future__ import print_function, division, absolute_import, unicode_literals
from core.layers_2d import *
from collections import OrderedDict
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
def create_ddf_label_net(target, atlases, dropout_rate,
train_phase=True, regularizer=None, normalizer=None,
features_root=16, filter_size=3,
pool_size=2, num_down_blocks=4, ddf_levels=None,
trainable=True, summaries=False, verbose=True,
logger=logging, **kwargs):
"""
Create a network for the prediction of the dense displacement fields between each atlas and the target image with the
given parametrization.
:param target: The input target images of shape [n_batch, *vol_shape, n_channel].
:param atlases: The input probabilistic atlases of shape [n_batch, *vol_shape, n_atlas, n_channel].
:param dropout_rate: Dropout probability.
:param train_phase: Whether it is in training or inference mode.
:param regularizer: Type of regularizer applied to the kernel weights.
:param normalizer: type of normalization to use, default is None,
choose from None, 'batch', 'group', 'layer', 'instance', 'batch_instance'
:param gap_filling: Whether to use gap-filling proposed in:
<NAME>, <NAME>, <NAME>, and <NAME>, “BIRNet: Brain Image Registration Using Dual-Supervised Fully
Convolutional Networks,” Med. Image Anal., vol. 54, pp. 193–206, May 2018.
:param num_filling_blocks:
:param features_root: The number of feature maps of the first convolution layer.
:param filter_size: The size of the convolution filter.
:param pool_size: The size of pooling window of the max pooling layer.
:param num_down_blocks: The number of downside convolution blocks.
:param ddf_levels: The levels of network to produce ddf summands.
:param trainable: Whether add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES
:param summaries: Flag if summaries should be created.
:param verbose: If true, print the network architecture settings.
:param logger: The logging module with specified configuration.
:returns: output_ddfs - The dense displacement field that register every atlas to the target image, of shape
[n_batch, *vol_shape, n_atlas, 2]. A dictionary with each key-value pair as ddf of a certain scale.
"""
ddf_levels = list(range(num_down_blocks + 1)) if ddf_levels is None else list(ddf_levels)
vol_shape = target.get_shape().as_list()[1:3]
n_atlas = atlases.get_shape().as_list()[-2]
gap_filling = kwargs.pop('gap_filling', False)
dropout_type = kwargs.pop('dropout_type', 'regular')
# regularization losses
regularization_loss = []
if verbose:
logger.info("Convolutional network for deformable registration with parameterization: "
"features root: {features}, filter size: {filter_size}x{filter_size}x{filter_size}, "
"pool size: {pool_size}x{pool_size}x{pool_size}, "
"number of down-conv blocks: {num_dw_blocks}, "
"ddf_levels: {ddf_levels}, "
"normalizer: {normalizer}, "
"dropout type: {dropout_type}".format(features=features_root, filter_size=filter_size,
pool_size=pool_size, num_dw_blocks=num_down_blocks,
ddf_levels=ddf_levels, normalizer=normalizer,
dropout_type=dropout_type))
def forward(inputs):
regularization_loss = 0.
with tf.variable_scope('encoder'):
hiddens = OrderedDict() # Intermediate inputs of each down-sampling layer.
# down layers
hiddens[0], loss = conv_block_layer(inputs, num_layers=1, filter_size=7, feature_size=features_root,
regularizer=regularizer, normalizer=normalizer, train_phase=train_phase,
trainable=trainable, name_or_scope='hidden_0')
regularization_loss += loss
for layer in range(num_down_blocks):
dw_h_conv, loss = residual_block_layer(hiddens[layer], filter_size=filter_size,
feature_size=features_root * 2 ** layer,
regularizer=regularizer, normalizer=normalizer,
train_phase=train_phase, trainable=trainable,
dropout_rate=dropout_rate, dropout_type=dropout_type,
name_or_scope='down_hidden_layer_%s' % layer)
regularization_loss += loss
hiddens[layer + 1], loss = transition_block_layer(dw_h_conv, pool_size=pool_size,
filter_size=filter_size, compression_rate=2,
regularizer=regularizer, normalizer=normalizer,
train_phase=train_phase, trainable=trainable,
name_or_scope='transition_down_layer_%s' % layer)
regularization_loss += loss
uppers = OrderedDict() # Intermediate inputs of each up-sampling layer.
with tf.variable_scope('decoder'):
# up layers
uppers[num_down_blocks] = hiddens[num_down_blocks]
for layer in range(num_down_blocks - 1, -1, -1):
up_h_conv, loss = residual_additive_upsample(uppers[layer + 1], filter_size=filter_size,
strides=pool_size, feature_size=features_root * 2 ** layer,
regularizer=regularizer, normalizer=normalizer,
train_phase=train_phase, trainable=trainable,
name_or_scope='additive_upsample_layer_%s' % layer)
regularization_loss += loss
# skip-connection whether to use the gap-filling strategy
if gap_filling:
num_filling_blocks = kwargs.pop('num_filling_blocks', (2, 1))
skip_features = hiddens[layer]
try:
gaps = OrderedDict()
for k in range(num_filling_blocks[layer]):
gaps[(layer, k)], loss = residual_block_layer(skip_features, filter_size=filter_size,
feature_size=features_root * 2 ** layer,
num_layers=2, regularizer=regularizer,
normalizer=normalizer, train_phase=train_phase,
trainable=trainable,
name_or_scope='gap_layer_%s_block_%s' % (layer, k))
regularization_loss += loss
skip_features = gaps[(layer, k)]
except IndexError:
pass
skip_connect = tf.add(up_h_conv, skip_features, name='skip_connect')
else:
skip_connect = tf.add(up_h_conv, hiddens[layer], name='skip_connect')
uppers[layer], loss = residual_block_layer(skip_connect, filter_size=filter_size,
feature_size=features_root * 2 ** layer,
regularizer=regularizer, normalizer=normalizer,
train_phase=train_phase, trainable=trainable,
dropout_rate=dropout_rate, dropout_type=dropout_type,
name_or_scope='up_hidden_layer_%s' % layer)
regularization_loss += loss
if summaries:
for k, v in hiddens.items():
tf.summary.histogram("dw_h_convs_%s" % k, v)
for k, v in uppers.items():
tf.summary.histogram("up_h_convs_%s" % k, v)
return uppers, regularization_loss
output_ddfs = []
for i in range(n_atlas):
with tf.variable_scope('compute_ddfs', reuse=i != 0):
inputs = tf.concat([target, atlases[..., i, :]], axis=-1)
uppers, loss = forward(inputs)
level_ddf = []
for idx in ddf_levels:
ddf, l = conv_upsample(uppers[idx], 2 ** idx, filter_size=filter_size,
feature_size=2, regularizer=regularizer,
trainable=trainable, name_or_scope='conv_upsample_ddf_%s' % idx)
loss += l
level_ddf.append(ddf)
regularization_loss.append(loss)
output_ddfs.append(tf.reduce_sum(tf.stack(level_ddf), axis=0, name='output_ddf_sum'))
return tf.stack(output_ddfs, axis=-2, name='output_ddfs'), tf.reduce_mean(regularization_loss)
def create_ddf_score_net(target, atlases, dropout_rate,
train_phase=True, regularizer=None, normalizer=None,
features_root=16, filter_size=3,
pool_size=2, num_down_blocks=4, ddf_levels=None,
trainable=True, summaries=False, verbose=True,
logger=logging, **kwargs):
"""
Create a network for the prediction of the dense displacement fields between each atlas and the target image with the
given parametrization.
:param target: The input target images of shape [n_batch, *vol_shape, n_channel].
:param atlases: The input probabilistic atlases of shape [n_batch, *vol_shape, n_atlas, n_channel].
:param dropout_rate: Dropout probability.
:param train_phase: Whether it is in training or inference mode.
:param regularizer: Type of regularizer applied to the kernel weights.
:param normalizer: type of normalization to use, default is None,
choose from None, 'batch', 'group', 'layer', 'instance', 'batch_instance'
:param gap_filling: Whether to use gap-filling proposed in:
<NAME>, <NAME>, <NAME>, and <NAME>, “BIRNet: Brain Image Registration Using Dual-Supervised Fully
Convolutional Networks,” Med. Image Anal., vol. 54, pp. 193–206, May 2018.
:param num_filling_blocks:
:param features_root: The number of feature maps of the first convolution layer.
:param filter_size: The size of the convolution filter.
:param pool_size: The size of pooling window of the max pooling layer.
:param num_down_blocks: The number of downside convolution blocks.
:param ddf_levels: The levels of network to produce ddf summands.
:param trainable: Whether add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES
:param summaries: Flag if summaries should be created.
:param verbose: If true, print the network architecture settings.
:param logger: The logging module with specified configuration.
:returns: output_ddfs - The dense displacement field that register every atlas to the target image, of shape
[n_batch, *vol_shape, n_atlas, 2]. A dictionary with each key-value pair as ddf of a certain scale
output_scores - A tensor of shape [n_batch, n_atlas, 1]
"""
ddf_levels = list(range(num_down_blocks + 1)) if ddf_levels is None else list(ddf_levels)
vol_shape = target.get_shape().as_list()[1:3]
n_atlas = atlases.get_shape().as_list()[-2]
gap_filling = kwargs.pop('gap_filling', False)
dropout_type = kwargs.pop('dropout_type', 'regular')
# regularization losses
regularization_loss = []
if verbose:
logger.info("Convolutional network for deformable registration with parameterization: "
"features root: {features}, filter size: {filter_size}x{filter_size}x{filter_size}, "
"pool size: {pool_size}x{pool_size}x{pool_size}, "
"number of down-conv blocks: {num_dw_blocks}, "
"ddf_levels: {ddf_levels}, "
"normalizer: {normalizer}, "
"dropout type: {dropout_type}".format(features=features_root, filter_size=filter_size,
pool_size=pool_size, num_dw_blocks=num_down_blocks,
ddf_levels=ddf_levels, normalizer=normalizer,
dropout_type=dropout_type))
def forward(inputs):
regularization_loss = 0.
with tf.variable_scope('encoder'):
hiddens = OrderedDict() # Intermediate inputs of each down-sampling layer.
# down layers
hiddens[0], loss = conv_block_layer(inputs, num_layers=1, filter_size=7, feature_size=features_root,
regularizer=regularizer, normalizer=normalizer, train_phase=train_phase,
trainable=trainable, name_or_scope='hidden_0')
regularization_loss += loss
for layer in range(num_down_blocks):
dw_h_conv, loss = residual_block_layer(hiddens[layer], filter_size=filter_size,
feature_size=features_root * 2 ** layer,
regularizer=regularizer, normalizer=normalizer,
train_phase=train_phase, trainable=trainable,
dropout_rate=dropout_rate, dropout_type=dropout_type,
name_or_scope='down_hidden_layer_%s' % layer)
regularization_loss += loss
hiddens[layer + 1], loss = transition_block_layer(dw_h_conv, pool_size=pool_size,
filter_size=filter_size, compression_rate=2,
regularizer=regularizer, normalizer=normalizer,
train_phase=train_phase, trainable=trainable,
name_or_scope='transition_down_layer_%s' % layer)
regularization_loss += loss
with tf.variable_scope('fully_connected_layer'):
x = hiddens[num_down_blocks] # [n_batch, nx, ny, n_feature]
x_mean = tf.reduce_mean(x, axis=[1, 2], name='global_pooling')
x_dense = tf.keras.layers.Dense(units=features_root, activation=tf.nn.leaky_relu, name='dense')(x_mean)
y = tf.keras.layers.Dense(units=1, name='output')(x_dense)
score = tf.log(1+tf.exp(y), name='score')
uppers = OrderedDict() # Intermediate inputs of each up-sampling layer.
with tf.variable_scope('decoder'):
# up layers
uppers[num_down_blocks] = hiddens[num_down_blocks]
for layer in range(num_down_blocks - 1, -1, -1):
up_h_conv, loss = residual_additive_upsample(uppers[layer + 1], filter_size=filter_size,
strides=pool_size, feature_size=features_root * 2 ** layer,
regularizer=regularizer, normalizer=normalizer,
train_phase=train_phase, trainable=trainable,
name_or_scope='additive_upsample_layer_%s' % layer)
regularization_loss += loss
# skip-connection whether to use the gap-filling strategy
if gap_filling:
num_filling_blocks = kwargs.pop('num_filling_blocks', (2, 1))
skip_features = hiddens[layer]
try:
gaps = OrderedDict()
for k in range(num_filling_blocks[layer]):
gaps[(layer, k)], loss = residual_block_layer(skip_features, filter_size=filter_size,
feature_size=features_root * 2 ** layer,
num_layers=2, regularizer=regularizer,
normalizer=normalizer, train_phase=train_phase,
trainable=trainable,
name_or_scope='gap_layer_%s_block_%s' % (layer, k))
regularization_loss += loss
skip_features = gaps[(layer, k)]
except IndexError:
pass
skip_connect = tf.add(up_h_conv, skip_features, name='skip_connect')
else:
skip_connect = tf.add(up_h_conv, hiddens[layer], name='skip_connect')
uppers[layer], loss = residual_block_layer(skip_connect, filter_size=filter_size,
feature_size=features_root * 2 ** layer,
regularizer=regularizer, normalizer=normalizer,
train_phase=train_phase, trainable=trainable,
dropout_rate=dropout_rate, dropout_type=dropout_type,
name_or_scope='up_hidden_layer_%s' % layer)
regularization_loss += loss
if summaries:
for k, v in hiddens.items():
tf.summary.histogram("dw_h_convs_%s" % k, v)
for k, v in uppers.items():
tf.summary.histogram("up_h_convs_%s" % k, v)
return uppers, regularization_loss, score
output_ddfs = []
output_scores = []
for i in range(n_atlas):
with tf.variable_scope('compute_ddfs', reuse=i != 0):
inputs = tf.concat([target, | |
단축코드)
self.ActiveX.SetFieldData(self.INBLOCK, "gubun", 0, 작업구분)
self.ActiveX.SetFieldData(self.INBLOCK, "time", 0, 시간)
self.ActiveX.SetFieldData(self.INBLOCK, "cnt", 0, 건수)
self.ActiveX.Request(0)
else:
self.ActiveX.SetFieldData(self.INBLOCK, "cts_time", 0, 시간)
err_code = self.ActiveX.Request(True) # 연속조회인경우만 True
if err_code < 0:
클래스이름 = self.__class__.__name__
함수이름 = inspect.currentframe().f_code.co_name
print("%s-%s " % (클래스이름, 함수이름), "error... {0}".format(err_code))
def OnReceiveData(self, szTrCode):
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK)
for i in range(nCount):
시간CTS = self.ActiveX.GetFieldData(self.OUTBLOCK, "cts_time", i).strip()
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
시간 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "chetime", i).strip()
종가 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "close", i).strip())
전일대비구분 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "sign", i).strip()
전일대비 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "change", i).strip())
등락율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "diff", i).strip())
체결강도 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "chdegree", i).strip())
매도체결수량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "mdvolume", i).strip())
매수체결수량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "msvolume", i).strip())
순매수체결량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "revolume", i).strip())
매도체결건수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "mdchecnt", i).strip())
매수체결건수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "mschecnt", i).strip())
순체결건수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "rechecnt", i).strip())
거래량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "volume", i).strip())
시가 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "open", i).strip())
고가 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "high", i).strip())
저가 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "low", i).strip())
체결량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "cvolume", i).strip())
매도체결건수시간 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "mdchecnttm", i).strip())
매수체결건수시간 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "mschecnttm", i).strip())
매도잔량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "totofferrem", i).strip())
매수잔량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "totbidrem", i).strip())
시간별매도체결량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "mdvolumetm", i).strip())
시간별매수체결량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "msvolumetm", i).strip())
lst = [시간, 종가, 전일대비구분, 전일대비, 등락율, 체결강도, 매도체결수량, 매수체결수량, 순매수체결량, 매도체결건수, 매수체결건수, 순체결건수, 거래량, 시가, 고가, 저가, 체결량,
매도체결건수시간, 매수체결건수시간, 매도잔량, 매수잔량, 시간별매도체결량, 시간별매수체결량]
result.append(lst)
columns = ['시간', '종가', '전일대비구분', '전일대비', '등락율', '체결강도', '매도체결수량', '매수체결수량', '순매수체결량', '매도체결건수', '매수체결건수',
'순체결건수', '거래량', '시가', '고가', '저가', '체결량', '매도체결건수시간', '매수체결건수시간', '매도잔량', '매수잔량', '시간별매도체결량',
'시간별매수체결량']
df = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [시간CTS, df])
# 기간별주가
class t1305(XAQuery):
def Query(self, 단축코드='',일주월구분='1',날짜='',IDX='',건수='900', 연속조회=False):
if 연속조회 == False:
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK, "shcode", 0, 단축코드)
self.ActiveX.SetFieldData(self.INBLOCK, "dwmcode", 0, 일주월구분)
self.ActiveX.SetFieldData(self.INBLOCK, "date", 0, 날짜)
self.ActiveX.SetFieldData(self.INBLOCK, "idx", 0, IDX)
self.ActiveX.SetFieldData(self.INBLOCK, "cnt", 0, 건수)
self.ActiveX.Request(0)
else:
self.ActiveX.SetFieldData(self.INBLOCK, "date", 0, 날짜)
self.ActiveX.SetFieldData(self.INBLOCK, "idx", 0, IDX)
self.ActiveX.SetFieldData(self.INBLOCK, "cnt", 0, 건수)
err_code = self.ActiveX.Request(True) # 연속조회인경우만 True
if err_code < 0:
클래스이름 = self.__class__.__name__
함수이름 = inspect.currentframe().f_code.co_name
print("%s-%s " % (클래스이름, 함수이름), "error... {0}".format(err_code))
def OnReceiveData(self, szTrCode):
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK)
for i in range(nCount):
CNT = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "cnt", i).strip())
날짜 = self.ActiveX.GetFieldData(self.OUTBLOCK, "date", i).strip()
IDX = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "idx", i).strip())
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
날짜 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "date", i).strip()
시가 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "open", i).strip())
고가 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "high", i).strip())
저가 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "low", i).strip())
종가 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "close", i).strip())
전일대비구분 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "sign", i).strip()
전일대비 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "change", i).strip())
등락율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "diff", i).strip())
누적거래량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "volume", i).strip())
거래증가율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "diff_vol", i).strip())
체결강도 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "chdegree", i).strip())
소진율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "sojinrate", i).strip())
회전율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "changerate", i).strip())
외인순매수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "fpvolume", i).strip())
기관순매수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "covolume", i).strip())
종목코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "shcode", i).strip()
누적거래대금 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "value", i).strip())
개인순매수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "ppvolume", i).strip())
시가대비구분 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "o_sign", i).strip()
시가대비 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "o_change", i).strip())
시가기준등락율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "o_diff", i).strip())
고가대비구분 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "h_sign", i).strip()
고가대비 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "h_change", i).strip())
고가기준등락율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "h_diff", i).strip())
저가대비구분 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "l_sign", i).strip()
저가대비 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "l_change", i).strip())
저가기준등락율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "l_diff", i).strip())
시가총액 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "marketcap", i).strip())
lst = [날짜, 시가, 고가, 저가, 종가, 전일대비구분, 전일대비, 등락율, 누적거래량, 거래증가율, 체결강도, 소진율, 회전율, 외인순매수, 기관순매수, 종목코드, 누적거래대금,
개인순매수, 시가대비구분, 시가대비, 시가기준등락율, 고가대비구분, 고가대비, 고가기준등락율, 저가대비구분, 저가대비, 저가기준등락율, 시가총액]
result.append(lst)
columns = ['날짜', '시가', '고가', '저가', '종가', '전일대비구분', '전일대비', '등락율', '누적거래량', '거래증가율', '체결강도', '소진율', '회전율',
'외인순매수', '기관순매수', '종목코드', '누적거래대금', '개인순매수', '시가대비구분', '시가대비', '시가기준등락율', '고가대비구분', '고가대비',
'고가기준등락율', '저가대비구분', '저가대비', '저가기준등락율', '시가총액']
df = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [CNT, 날짜, IDX, df])
# 거래량상위
class t1452(XAQuery):
def Query(self, 구분='0',전일구분='',시작등락율='',종료등락율='',대상제외='',시작가격='',종료가격='',거래량='',IDX='',연속조회=False):
if 연속조회 == False:
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK, "gubun", 0, 구분)
self.ActiveX.SetFieldData(self.INBLOCK, "jnilgubun", 0, 전일구분)
self.ActiveX.SetFieldData(self.INBLOCK, "sdiff", 0, 시작등락율)
self.ActiveX.SetFieldData(self.INBLOCK, "ediff", 0, 종료등락율)
self.ActiveX.SetFieldData(self.INBLOCK, "jc_num", 0, 대상제외)
self.ActiveX.SetFieldData(self.INBLOCK, "sprice", 0, 시작가격)
self.ActiveX.SetFieldData(self.INBLOCK, "eprice", 0, 종료가격)
self.ActiveX.SetFieldData(self.INBLOCK, "volume", 0, 거래량)
self.ActiveX.SetFieldData(self.INBLOCK, "idx", 0, IDX)
self.ActiveX.Request(0)
else:
self.ActiveX.SetFieldData(self.INBLOCK, "idx", 0, IDX)
err_code = self.ActiveX.Request(True) # 연속조회인경우만 True
if err_code < 0:
클래스이름 = self.__class__.__name__
함수이름 = inspect.currentframe().f_code.co_name
print("%s-%s " % (클래스이름, 함수이름), "error... {0}".format(err_code))
def OnReceiveData(self, szTrCode):
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK)
for i in range(nCount):
IDX = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "idx", i).strip())
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
종목명 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "hname", i).strip()
현재가 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "price", i).strip())
전일대비구분 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "sign", i).strip()
전일대비 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "change", i).strip())
등락율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "diff", i).strip())
누적거래량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "volume", i).strip())
회전율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "vol", i).strip())
전일거래량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "jnilvolume", i).strip())
전일비 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "bef_diff", i).strip())
종목코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "shcode", i).strip()
lst = [종목명,현재가,전일대비구분,전일대비,등락율,누적거래량,회전율,전일거래량,전일비,종목코드]
result.append(lst)
columns = ['종목명','현재가','전일대비구분','전일대비','등락율','누적거래량','회전율','전일거래량','전일비','종목코드']
df = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [IDX, df])
# 거래대금상위
class t1463(XAQuery):
def Query(self, 구분='0',전일구분='',대상제외='',시작가격='',종료가격='',거래량='',IDX='',대상제외2='',연속조회=False):
if 연속조회 == False:
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK, "gubun", 0, 구분)
self.ActiveX.SetFieldData(self.INBLOCK, "jnilgubun", 0, 전일구분)
self.ActiveX.SetFieldData(self.INBLOCK, "jc_num", 0, 대상제외)
self.ActiveX.SetFieldData(self.INBLOCK, "sprice", 0, 시작가격)
self.ActiveX.SetFieldData(self.INBLOCK, "eprice", 0, 종료가격)
self.ActiveX.SetFieldData(self.INBLOCK, "volume", 0, 거래량)
self.ActiveX.SetFieldData(self.INBLOCK, "idx", 0, IDX)
self.ActiveX.SetFieldData(self.INBLOCK, "jc_num2", 0, 대상제외2)
self.ActiveX.Request(0)
else:
self.ActiveX.SetFieldData(self.INBLOCK, "idx", 0, IDX)
err_code = self.ActiveX.Request(True) # 연속조회인경우만 True
if err_code < 0:
클래스이름 = self.__class__.__name__
함수이름 = inspect.currentframe().f_code.co_name
print("%s-%s " % (클래스이름, 함수이름), "error... {0}".format(err_code))
def OnReceiveData(self, szTrCode):
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK)
for i in range(nCount):
IDX = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "idx", i).strip())
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
한글명 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "hname", i).strip()
현재가 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "price", i).strip())
전일대비구분 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "sign", i).strip()
전일대비 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "change", i).strip())
등락율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "diff", i).strip())
누적거래량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "volume", i).strip())
거래대금 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "value", i).strip())
전일거래대금 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "jnilvalue", i).strip())
전일비 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "bef_diff", i).strip())
종목코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "shcode", i).strip()
filler = self.ActiveX.GetFieldData(self.OUTBLOCK1, "filler", i).strip()
전일거래량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "jnilvolume", i).strip())
lst = [한글명, 현재가, 전일대비구분, 전일대비, 등락율, 누적거래량, 거래대금, 전일거래대금, 전일비, 종목코드, filler, 전일거래량]
result.append(lst)
columns = ['한글명', '현재가', '전일대비구분', '전일대비', '등락율', '누적거래량', '거래대금', '전일거래대금', '전일비', '종목코드', 'filler', '전일거래량']
df = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [IDX, df])
# 업종기간별추이
class t1514(XAQuery):
def Query(self, 업종코드='001',구분1='',구분2='1',CTS일자='',조회건수='100',비중구분='', 연속조회=False):
if 연속조회 == False:
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK, "upcode", 0, 업종코드)
self.ActiveX.SetFieldData(self.INBLOCK, "gubun1", 0, 구분1)
self.ActiveX.SetFieldData(self.INBLOCK, "gubun2", 0, 구분2)
self.ActiveX.SetFieldData(self.INBLOCK, "cts_date", 0, CTS일자)
self.ActiveX.SetFieldData(self.INBLOCK, "cnt", 0, 조회건수)
self.ActiveX.SetFieldData(self.INBLOCK, "rate_gbn", 0, 비중구분)
self.ActiveX.Request(0)
else:
self.ActiveX.SetFieldData(self.INBLOCK, "cts_date", 0, CTS일자)
err_code = self.ActiveX.Request(True) # 연속조회인경우만 True
if err_code < 0:
클래스이름 = self.__class__.__name__
함수이름 = inspect.currentframe().f_code.co_name
print("%s-%s " % (클래스이름, 함수이름), "error... {0}".format(err_code))
def OnReceiveData(self, szTrCode):
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK)
for i in range(nCount):
CTS일자 = self.ActiveX.GetFieldData(self.OUTBLOCK, "cts_date", i).strip()
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
일자 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "date", i).strip()
지수 = self.tofloat(self.ActiveX.GetFieldData(self.OUTBLOCK1, "jisu", i))
전일대비구분 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "sign", i).strip()
전일대비 = self.tofloat(self.ActiveX.GetFieldData(self.OUTBLOCK1, "change", i))
등락율 = self.tofloat(self.ActiveX.GetFieldData(self.OUTBLOCK1, "diff", i))
거래량 = self.toint(self.ActiveX.GetFieldData(self.OUTBLOCK1, "volume", i))
거래증가율 = self.tofloat(self.ActiveX.GetFieldData(self.OUTBLOCK1, "diff_vol", i))
거래대금1 = self.toint(self.ActiveX.GetFieldData(self.OUTBLOCK1, "value1", i))
상승 = self.toint(self.ActiveX.GetFieldData(self.OUTBLOCK1, "high", i))
보합 = self.toint(self.ActiveX.GetFieldData(self.OUTBLOCK1, "unchg", i))
하락 = self.toint(self.ActiveX.GetFieldData(self.OUTBLOCK1, "low", i))
상승종목비율 = self.tofloat(self.ActiveX.GetFieldData(self.OUTBLOCK1, "uprate", i))
외인순매수 = self.toint(self.ActiveX.GetFieldData(self.OUTBLOCK1, "frgsvolume", i))
시가 = self.tofloat(self.ActiveX.GetFieldData(self.OUTBLOCK1, "openjisu", i))
고가 = self.tofloat(self.ActiveX.GetFieldData(self.OUTBLOCK1, "highjisu", i))
저가 = self.tofloat(self.ActiveX.GetFieldData(self.OUTBLOCK1, "lowjisu", i))
거래대금2 = self.toint(self.ActiveX.GetFieldData(self.OUTBLOCK1, "value2", i))
상한 = self.toint(self.ActiveX.GetFieldData(self.OUTBLOCK1, "up", i))
하한 = self.toint(self.ActiveX.GetFieldData(self.OUTBLOCK1, "down", i))
종목수 = self.toint(self.ActiveX.GetFieldData(self.OUTBLOCK1, "totjo", i))
기관순매수 = self.toint(self.ActiveX.GetFieldData(self.OUTBLOCK1, "orgsvolume", i))
업종코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "upcode", i).strip()
거래비중 = self.tofloat(self.ActiveX.GetFieldData(self.OUTBLOCK1, "rate", i))
업종배당수익률 = self.tofloat(self.ActiveX.GetFieldData(self.OUTBLOCK1, "divrate", i))
lst = [일자, 지수, 전일대비구분, 전일대비, 등락율, 거래량, 거래증가율, 거래대금1, 상승, 보합, 하락, 상승종목비율,
외인순매수, 시가, 고가, 저가, 거래대금2, 상한, 하한, 종목수, 기관순매수, 업종코드, 거래비중, 업종배당수익률]
result.append(lst)
columns = ['일자', '지수', '전일대비구분', '전일대비', '등락율', '거래량', '거래증가율', '거래대금1', '상승', '보합', '하락', '상승종목비율', '외인순매수',
'시가', '고가', '저가', '거래대금2', '상한', '하한', '종목수', '기관순매수', '업종코드', '거래비중', '업종배당수익률']
df = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [CTS일자, df])
# 업종별 종목시세
# 업종별종목 리스트
class t1516(XAQuery):
def Query(self, 업종코드='001',구분='',종목코드='', 연속조회=False):
if 연속조회 == False:
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK, "upcode", 0, 업종코드)
self.ActiveX.SetFieldData(self.INBLOCK, "gubun", 0, 구분)
self.ActiveX.SetFieldData(self.INBLOCK, "shcode", 0, 종목코드)
self.ActiveX.Request(0)
else:
self.ActiveX.SetFieldData(self.INBLOCK, "shcode", 0, 종목코드)
err_code = self.ActiveX.Request(True) # 연속조회인경우만 True
if err_code < 0:
클래스이름 = self.__class__.__name__
함수이름 = inspect.currentframe().f_code.co_name
print("%s-%s " % (클래스이름, 함수이름), "error... {0}".format(err_code))
def OnReceiveData(self, szTrCode):
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK)
for i in | |
"""
Common classes for service and gateway transformation, which makes
the construction of the expected data map easier.
"""
from typing import Dict, List, Iterable, Sequence, Literal, Optional, Any, cast
from ..log import debug
from ..validation import validate_proxy_input
class HeaderQueryMatcher:
"""Matches a header value."""
__slots__ = ('name', 'match_type', 'case_sensitive', 'invert', 'match_value',)
def __init__(
self,
name: str, match_type: str,
case_sensitive: bool,
match_value: Optional[str],
invert: bool = False,
) -> None:
self.name = name
self.match_type = match_type
self.case_sensitive = case_sensitive
self.invert = invert
self.match_value = match_value or ''
def get_context(self) -> Dict[str, Any]:
"""Get the return context value."""
return {
'name': self.name,
'match': self.match_value,
'is_exact_match': self.match_type == 'exact',
'is_regex_match': self.match_type == 'regex',
'is_present_match': self.match_type == 'present',
'is_prefix_match': self.match_type == 'prefix',
'is_suffix_match': self.match_type == 'suffix',
'invert_match': self.invert, # this is ignored for query parameters...
'case_sensitive': self.case_sensitive,
}
def __repr__(self) -> str:
return repr(self.get_context())
def __eq__(self, other: Any) -> bool:
if self is other:
return True
if not isinstance(other, HeaderQueryMatcher):
return False
return (
self.name == other.name
and self.match_type == other.match_type
and self.case_sensitive == other.case_sensitive
and self.invert == other.invert
and self.match_value == other.match_value
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return (
hash(self.name)
+ hash(self.match_type)
+ hash(self.case_sensitive)
+ hash(self.invert)
+ hash(self.match_value)
)
class RoutePathMatcher:
"""Matches the route path."""
__slots__ = ('path', 'path_type', 'case_sensitive',)
def __init__(self, path: str, path_type: str, case_sensitive: bool) -> None:
self.path = path
self.path_type = path_type
self.case_sensitive = case_sensitive
@property
def is_prefix(self) -> bool:
"""Is the path type a prefix?"""
return self.path_type == 'prefix'
@property
def is_exact(self) -> bool:
"""Is the path type exact?"""
return self.path_type == 'exact'
@property
def is_regex(self) -> bool:
"""Is the path type a regular expression?"""
return self.path_type == 'regex'
def __eq__(self, other: Any) -> bool:
if other is self:
return True
if not isinstance(other, RoutePathMatcher):
return False
return (
self.path == other.path
and self.path_type == other.path_type
and self.case_sensitive == other.case_sensitive
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return (
hash(self.path)
+ hash(self.path_type)
+ hash(self.case_sensitive)
)
class RouteMatcher:
"""Matches the route path, headers, and query parameters."""
__slots__ = ('path_matcher', 'header_matchers', 'query_matchers',)
def __init__(
self,
path_matcher: RoutePathMatcher,
header_matchers: Sequence[HeaderQueryMatcher],
query_matchers: Sequence[HeaderQueryMatcher],
) -> None:
self.path_matcher = path_matcher
self.header_matchers = tuple(header_matchers)
self.query_matchers = tuple(query_matchers)
def get_context(self) -> Dict[str, Any]:
"""Get the return context base set for this matcher."""
return {
'route_path': self.path_matcher.path,
'path_is_prefix': self.path_matcher.is_prefix,
'path_is_exact': self.path_matcher.is_exact,
'path_is_regex': self.path_matcher.is_regex,
'path_is_case_sensitive': self.path_matcher.case_sensitive,
'has_header_filters': len(self.header_matchers) > 0,
'header_filters': [matcher.get_context() for matcher in self.header_matchers],
'has_query_filters': len(self.query_matchers) > 0,
'query_filters': [matcher.get_context() for matcher in self.query_matchers],
}
def __repr__(self) -> str:
return repr(self.get_context())
# This is used as a dictionary key...
def __eq__(self, other: Any) -> bool:
if other is self:
return True
if not isinstance(other, RouteMatcher):
return False
return (
self.path_matcher == other.path_matcher
# order in the matchers doesn't matter?
and set(self.header_matchers) == set(other.header_matchers)
and set(self.query_matchers) == set(other.query_matchers)
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return (
hash(self.path_matcher)
+ hash(self.header_matchers)
+ hash(self.query_matchers)
)
class EnvoyRoute:
"""
Defines the URL path to cluster matching.
"""
__slots__ = (
'matcher', 'cluster_weights',
)
def __init__(
self,
matcher: RouteMatcher,
cluster_weights: Dict[str, int],
) -> None:
"""
Create a weighted route.
The cluster_weight is an association of cluster to the relative weight
of that cluster routing. If there are no cluster weights, then this
route will not be generated.
"local" routes are for connections between services within the same
mesh. Gateway proxies must always set this to False.
"""
self.matcher = matcher
self.cluster_weights = dict(cluster_weights)
@property
def total_weight(self) -> int:
"""The total cluster weight."""
return sum(self.cluster_weights.values())
def is_valid(self) -> bool:
"""Checks if this cluster is valid."""
for cluster, weight in self.cluster_weights.items():
if not cluster or weight <= 0:
return False
return True
def get_context(self) -> Optional[Dict[str, Any]]:
"""Get the JSON context data for this route."""
cluster_count = len(self.cluster_weights)
if cluster_count <= 0:
return None
ret = self.matcher.get_context()
ret.update({
'has_one_cluster': cluster_count == 1,
'has_many_clusters': cluster_count > 1,
'total_cluster_weight': self.total_weight,
'clusters': [{
'cluster_name': cn,
'route_weight': cw,
} for cn, cw in self.cluster_weights.items()],
})
return ret
class EnvoyListener:
"""
Defines a port listener in envoy, which corresponds to a namespace.
"""
__slots__ = ('port', 'routes',)
def __init__(self, port: Optional[int], routes: Iterable[EnvoyRoute]) -> None:
self.port = port
self.routes = list(routes)
def is_valid(self) -> bool:
"""Checks if this cluster is valid."""
if self.port is not None:
return 0 < self.port <= 65535
return True
def get_route_contexts(self) -> List[Dict[str, Any]]:
"""Get each route's JSON context data."""
ret: List[Dict[str, Any]] = []
for route in self.routes:
ctx = route.get_context()
if ctx:
ret.append(ctx)
return ret
def get_context(self) -> Dict[str, Any]:
"""Get the JSON context for this listener, including its routes."""
return {
'has_mesh_port': self.port is not None,
'mesh_port': self.port,
'routes': self.get_route_contexts(),
}
HostFormat = Literal['ipv4', 'ipv6', 'hostname']
class EnvoyClusterEndpoint:
"""
An endpoint within an envoy cluster.
"""
__slots__ = ('host', 'port', 'host_format',)
def __init__(self, host: str, port: int, host_format: HostFormat) -> None:
self.host = host
self.port = port
self.host_format = host_format
def is_valid(self) -> bool:
"""Checks whether the configuration is valid."""
# Right now, only ipv4 is supported in the proxy input schema.
return self.host_format == 'ipv4' and 0 < self.port <= 65535
def get_context(self) -> Dict[str, Any]:
"""Create a json context"""
return {
'host': self.host,
'port': self.port,
}
def __eq__(self, other: Any) -> bool:
if not isinstance(other, EnvoyClusterEndpoint):
return False
return (
self.host == other.host
and self.port == other.port
and self.host_format == other.host_format
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
# host format is implicit in this, so don't need to explicitly calculate.
return hash(self.host) + hash(self.port)
ClusterEndpointHostType = Literal['hostname', 'ipv4', 'ipv6']
class EnvoyCluster:
"""
Defines a cluster within envoy. It's already been weighted according to the path.
"""
__slots__ = ('cluster_name', 'uses_http2', 'instances', 'host_type',)
def __init__(
self,
cluster_name: str,
uses_http2: bool,
host_type: ClusterEndpointHostType,
instances: Iterable[EnvoyClusterEndpoint],
) -> None:
self.cluster_name = cluster_name
self.uses_http2 = uses_http2
self.host_type = host_type
self.instances = list(instances)
def is_valid(self) -> bool:
"""Checks if this cluster is valid."""
for instance in self.instances:
if not instance.is_valid():
return False
return True
def endpoint_count(self) -> int:
"""Count the number of endpoints."""
return len(self.instances)
def get_context(self) -> Dict[str, Any]:
"""Get the JSON context for this cluster."""
instances = self.instances
if not instances:
# We need something here, otherwise the route will say the cluster doesn't exist.
debug(
"No instances known for cluster {c}; creating temporary one.",
c=self.cluster_name,
)
return {
'name': self.cluster_name,
'uses_http2': self.uses_http2,
'hosts_are_ipv4': self.host_type == 'ipv4',
'hosts_are_ipv6': self.host_type == 'ipv6',
'hosts_are_hostname': self.host_type == 'hostname',
'endpoints': [
inst.get_context()
for inst in instances
],
}
class EnvoyConfig:
"""An entire configuration data schema for use to import into a mustache template."""
__slots__ = ('listeners', 'clusters',)
def __init__(
self,
listeners: Iterable[EnvoyListener],
clusters: Iterable[EnvoyCluster],
) -> None:
self.listeners = list(listeners)
self.clusters = list(clusters)
def is_valid(self) -> bool:
"""Checks whether this configuration is valid or not."""
if not self.listeners or not self.clusters:
return False
for listener in self.listeners:
if not listener.is_valid():
return False
for cluster in self.clusters:
if not cluster.is_valid():
return False
return True
def get_context(
self, network_name: str, service_member: str,
admin_port: Optional[int],
) -> Dict[str, Any]:
"""Get the JSON context for this configuration."""
cluster_endpoint_count = sum([c.endpoint_count() for c in self.clusters])
return {
'network_name': network_name,
'service_member': service_member,
'has_admin_port': admin_port is not None,
'admin_port': admin_port,
'listeners': [lt.get_context() for lt in self.listeners],
'has_clusters': cluster_endpoint_count > 0,
'clusters': [c.get_context() for c in self.clusters],
}
class EnvoyConfigContext:
"""Configuration context for an envoy instance."""
__slots__ = ('config', 'network_id', 'service', 'admin_port',)
def __init__(
self, config: EnvoyConfig, network_id: str,
service: str, admin_port: Optional[int],
) -> None:
self.config = config
self.network_id = network_id
self.service = service
self.admin_port = admin_port
def get_context(self) -> Dict[str, Any]:
"""Get the JSON structure for the context. This is validated to be in the
correct format."""
ret = self.config.get_context(
self.network_id, self.service, self.admin_port,
)
ret['schema-version'] = 'v1'
return validate_proxy_input(ret)
def is_protocol_http2(protocol: Optional[str]) -> bool:
"""Checks whether the protocol is http2."""
return protocol is not None and | |
(u'More Limits', u'rkeU8_4nzKo', u'more-limits'),
(u'Epsilon Delta Limit Definition 1',
u'-ejyeII0i5c',
u'epsilon-delta-limit-definition-1'),
(u'Epsilon Delta Limit Definition 2',
u'Fdu5-aNJTzU',
u'epsilon-delta-limit-definition-2'),
(u'Calculus Derivatives 1 (new HD version)',
u'ANyVpMS3HL4',
u'calculus--derivatives-1--new-hd-version'),
(u'Calculus Derivatives 2 (new HD version)',
u'IePCHjMeFkE',
u'calculus--derivatives-2--new-hd-version'),
(u'Calculus Derivatives 2.5 (new HD version)',
u'HEH_oKNLgUU',
u'calculus--derivatives-2-5--new-hd-version'),
(u'Calculus Derivatives 1',
u'rAof9Ld5sOg',
u'calculus--derivatives-1'),
(u'Calculus Derivatives 2',
u'ay8838UZ4nM',
u'calculus--derivatives-2'),
(u'Calculus Derivatives 3',
u'z1lwai-lIzY',
u'calculus--derivatives-3'),
(u'The Chain Rule', u'XIQ-KnsAsbg', u'the-chain-rule'),
(u'Chain Rule Examples',
u'6_lmiPDedsY',
u'chain-rule-examples'),
(u'Even More Chain Rule',
u'DYb-AN-lK94',
u'even-more-chain-rule'),
(u'Product Rule', u'h78GdGiRmpM', u'product-rule'),
(u'Quotient Rule', u'E_1gEtiGPNI', u'quotient-rule'),
(u'Derivatives (part 9)',
u'aEP4C_kvcO4',
u'derivatives--part-9'),
(u'Proof d/dx(x^n)', u'dZnc3PtNaN4', u'proof--d-dx-x-n'),
(u'Proof d/dx(sqrt(x))', u'789aMeepbxI', u'proof--d-dx-sqrt-x'),
(u'Proof d/dx(ln x) = 1/x',
u'yUpDRpkUhf4',
u'proof--d-dx-ln-x----1-x'),
(u'Proof d/dx(e^x) = e^x',
u'sSE6_fK3mu0',
u'proof--d-dx-e-x----e-x'),
(u'Proofs of Derivatives of Ln(x) and e^x',
u'3nQejB-XPoY',
u'proofs-of-derivatives-of-ln-x--and-e-x'),
(u'Extreme Derivative Word Problem (advanced)',
u'viaPc8zDcRI',
u'extreme-derivative-word-problem--advanced'),
(u'Implicit Differentiation',
u'sL6MC-lKOrw',
u'implicit-differentiation'),
(u'Implicit Differentiation (part 2)',
u'PUsMyhds5S4',
u'implicit-differentiation--part-2'),
(u'More implicit differentiation',
u'hrg1hCzg3W0',
u'more-implicit-differentiation'),
(u'More chain rule and implicit differentiation intuition',
u'XHBkQW_XuA4',
u'more-chain-rule-and-implicit-differentiation-intuition'),
(u'Trig Implicit Differentiation Example',
u'6xvwyE67CeM',
u'trig-implicit-differentiation-example'),
(u'Calculus Derivative of x^(x^x)',
u'N5kkwVoAtkc',
u'calculus--derivative-of-x--x-x'),
(u"Introduction to L'Hopital's Rule",
u'PdSzruR5OeE',
u'introduction-to-l-hopital-s-rule'),
(u"L'Hopital's Rule Example 1",
u'BiVOC3WocXs',
u'l-hopital-s-rule-example-1'),
(u"L'Hopital's Rule Example 2",
u'FJo18AwLfuI',
u'l-hopital-s-rule-example-2'),
(u"L'Hopital's Rule Example 3",
u'MeVFZjT-ABM',
u'l-hopital-s-rule-example-3'),
(u'Maxima Minima Slope Intuition',
u'tpHz0gZfVss',
u'maxima-minima-slope-intuition'),
(u'Inflection Points and Concavity Intuition',
u'dIE22eL6q90',
u'inflection-points-and-concavity-intuition'),
(u'Monotonicity Theorem',
u'WrEcQsa-1ME',
u'monotonicity-theorem'),
(u'Calculus Maximum and minimum values on an interval',
u'gzmSKrwiG3g',
u'calculus--maximum-and-minimum-values-on-an-interval'),
(u'Calculus Graphing Using Derivatives',
u'hIgnece9ins',
u'calculus--graphing-using-derivatives'),
(u'Calculus Graphing with Derivatives Example',
u'zC_dTaEY2AY',
u'calculus-graphing-with-derivatives-example'),
(u'Graphing with Calculus',
u'ojcp0GJKluM',
u'graphing-with-calculus'),
(u'Optimization with Calculus 1',
u'Ef22yTJDUZI',
u'optimization-with-calculus-1'),
(u'Optimization with Calculus 2',
u'3GYv-BZYYdg',
u'optimization-with-calculus-2'),
(u'Optimization with Calculus 3',
u'i8Wtu-kdDC4',
u'optimization-with-calculus-3'),
(u'Optimization Example 4',
u'T8sG4Sb3g7Y',
u'optimization--example-4'),
(u'Introduction to rate-of-change problems',
u'Zyq6TmQVBxk',
u'introduction-to-rate-of-change-problems'),
(u'Equation of a tangent line',
u'1KwW1v__T_0',
u'equation-of-a-tangent-line'),
(u'Rates-of-change (part 2)',
u'xmgk8_l3lig',
u'rates-of-change--part-2'),
(u'Ladder rate-of-change problem',
u'hD3U65CcZ0Q',
u'ladder-rate-of-change-problem'),
(u'Mean Value Theorem', u'bGNMXfaNR5Q', u'mean-value-theorem'),
(u'The Indefinite Integral or Anti-derivative',
u'xRspb-iev-g',
u'the-indefinite-integral-or-anti-derivative'),
(u'Indefinite integrals (part II)',
u'mHvSYRUEWnE',
u'indefinite-integrals--part-ii'),
(u'Indefinite Integration (part III)',
u'77-najNh4iY',
u'indefinite-integration--part-iii'),
(u'Indefinite Integration (part IV)',
u'VJ9VRUDQyK8',
u'indefinite-integration--part-iv'),
(u'Indefinite Integration (part V)',
u'Pra6r20geXU',
u'indefinite-integration--part-v'),
(u'Integration by Parts (part 6 of Indefinite Integration)',
u'ouYZiIh8Ctc',
u'integration-by-parts--part-6-of-indefinite-integration'),
(u'Indefinite Integration (part 7)',
u'F-OsMq7QKEQ',
u'indefinite-integration--part-7'),
(u'Another u-subsitution example',
u'IAbSeAk5RJU',
u'another-u-subsitution-example'),
(u'Introduction to definite integrals',
u'0RdI3-8G4Fs',
u'introduction-to-definite-integrals'),
(u'Definite integrals (part II)',
u'6PaFm_Je5A0',
u'definite-integrals--part-ii'),
(u'Definite Integrals (area under a curve) (part III)',
u'7wUHJ7JQ-gs',
u'definite-integrals--area-under-a-curve---part-iii'),
(u'Definite Integrals (part 4)',
u'11Bt6OhIeqA',
u'definite-integrals--part-4'),
(u'Definite Integrals (part 5)',
u'CmXmRNFrtFw',
u'definite-integrals--part-5'),
(u'Definite integral with substitution',
u'CbUx0S8BCtA',
u'definite-integral-with-substitution'),
(u'Integrals Trig Substitution 1',
u'n4EK92CSuBE',
u'integrals--trig-substitution-1'),
(u'Integrals Trig Substitution 2',
u'fD7MbnXbTls',
u'integrals--trig-substitution-2'),
(u'Integrals Trig Substitution 3 (long problem)',
u'sw2p2tUIFpc',
u'integrals--trig-substitution-3--long-problem'),
(u'Periodic Definite Integral',
u'CZdziIlYIfI',
u'periodic-definite-integral'),
(u'Introduction to differential equations',
u'C8mudsCSmcU',
u'introduction-to-differential-equations'),
(u'Solid of Revolution (part 1)',
u'R_aqSL-q6_8',
u'solid-of-revolution--part-1'),
(u'Solid of Revolution (part 2)',
u'iUzfsUOl3-A',
u'solid-of-revolution---part-2'),
(u'Solid of Revolution (part 3)',
u'tqfU9mC2yFU',
u'solid-of-revolution--part-3'),
(u'Solid of Revolution (part 4)',
u'OtmjNuiTHp0',
u'solid-of-revolution--part-4'),
(u'Solid of Revolution (part 5)',
u'NIdqkwocNuE',
u'solid-of-revolution--part-5'),
(u'Solid of Revolution (part 6)',
u'F2psxMnGdUw',
u'solid-of-revolution--part-6'),
(u'Solid of Revolution (part 7)',
u'IZ8W-h764Cc',
u'solid-of-revolution--part-7'),
(u'Solid of Revolution (part 8)',
u'4Flj9plmKGQ',
u'solid-of-revolution--part-8'),
(u'Sequences and Series (part 1)',
u'VgVJrSJxkDk',
u'sequences-and-series--part-1'),
(u'Sequences and series (part 2)',
u'U_8GRLJplZg',
u'sequences-and-series--part-2'),
(u'Polynomial approximation of functions (part 1)',
u'sy132cgqaiU',
u'polynomial-approximation-of-functions--part-1'),
(u'Polynomial approximation of functions (part 2)',
u'3JG3qn7-Sac',
u'polynomial-approximation-of-functions--part-2'),
(u'Approximating functions with polynomials (part 3)',
u'XZDGrbyz0v0',
u'approximating-functions-with-polynomials--part-3'),
(u'Polynomial approximation of functions (part 4)',
u'gcJeg4SdIpU',
u'polynomial-approximation-of-functions--part-4'),
(u'Polynomial approximations of functions (part 5)',
u'9AoDucUmO20',
u'polynomial-approximations-of-functions--part-5'),
(u'Polynomial approximation of functions (part 6)',
u'-gRNRBCG3Ow',
u'polynomial-approximation-of-functions--part-6'),
(u'Polynomial approximation of functions (part 7)',
u'bC5Lahh4Aus',
u'polynomial-approximation-of-functions--part-7'),
(u'Taylor Polynomials', u'8SsC5st4LnI', u'taylor-polynomials'),
(u'Exponential Growth', u'JWfTckls59k', u'exponential-growth'),
(u'AP Calculus BC Exams 2008 1 a',
u'upO6Mh862PI',
u'ap-calculus-bc-exams--2008-1-a'),
(u'AP Calculus BC Exams 2008 1 b&c',
u'xPb6HLM3xEQ',
u'ap-calculus-bc-exams--2008-1-b-c'),
(u'AP Calculus BC Exams 2008 1 c&d',
u'_l0Mfsu__gU',
u'ap-calculus-bc-exams--2008-1-c-d'),
(u'AP Calculus BC Exams 2008 1 d',
u'sPTuCE5zd3s',
u'ap-calculus-bc-exams--2008-1-d'),
(u'Calculus BC 2008 2 a',
u'xvvI_QRYxBY',
u'calculus-bc-2008-2-a'),
(u'Calculus BC 2008 2 b &c',
u'S4oOSgTj9C8',
u'calculus-bc-2008-2-b--c'),
(u'Calculus BC 2008 2d',
u'o_vMb655dFk',
u'calculus-bc-2008-2d'),
(u'Partial Derivatives',
u'1CMDS4-PKKQ',
u'partial-derivatives'),
(u'Partial Derivatives 2',
u'-u0mqFqpMNY',
u'partial-derivatives-2'),
(u'Gradient 1', u'U7HQ_G_N6vo', u'gradient-1'),
(u'Gradient of a scalar field',
u'OB8b8aDGLgE',
u'gradient-of-a-scalar-field'),
(u'Divergence 1', u'JAXyLhvZ-Vg', u'divergence-1'),
(u'Divergence 2', u'tOX3RkH2guE', u'divergence-2'),
(u'Divergence 3', u'U6Re4xT0o4w', u'divergence-3'),
(u'Curl 1', u'Mt4dpGFVsYc', u'curl-1'),
(u'Curl 2', u'hTSyVgBa1T0', u'curl-2'),
(u'Curl 3', u'fYzoiWIBjP8', u'curl-3'),
(u'Double Integral 1', u'85zGYB-34jQ', u'double-integral-1'),
(u'Double Integrals 2', u'TdLD2Zh-nUQ', u'double-integrals-2'),
(u'Double Integrals 3', u'z8BM6cHifPA', u'double-integrals-3'),
(u'Double Integrals 4', u'twT-WZChfZ8', u'double-integrals-4'),
(u'Double Integrals 5', u'hrIPO8mQqtw', u'double-integrals-5'),
(u'Double Integrals 6', u'0pv0QtOi5l8', u'double-integrals-6'),
(u'Triple Integrals 1', u'vr0sTKbV7lI', u'triple-integrals-1'),
(u'Triple Integrals 2', u'vxQvL_WhBGU', u'triple-integrals-2'),
(u'Triple Integrals 3', u'ZN2PfqZ4ihM', u'triple-integrals-3'),
(u'(2^ln x)/x Antiderivative Example',
u'C5Lbjbyr1t4',
u'2-ln-x--x--antiderivative-example'),
(u'Introduction to the Line Integral',
u'_60sKaoRmhU',
u'introduction-to-the-line-integral'),
(u'Line Integral Example 1',
u'uXjQ8yc9Pdg',
u'line-integral-example-1'),
(u'Line Integral Example 2 (part 1)',
u'wyTjyQMVvc4',
u'line-integral-example-2--part-1'),
(u'Line Integral Example 2 (part 2)',
u'Qqanbd3gLhw',
u'line-integral-example-2--part-2'),
(u'Position Vector Valued Functions',
u'sBldw95xMD4',
u'position-vector-valued-functions'),
(u'Derivative of a position vector valued function',
u'E9Q_Lc0g1xE',
u'derivative-of-a-position-vector-valued-function'),
(u'Differential of a vector valued function',
u'FYMn61HLw1k',
u'differential-of-a-vector-valued-function'),
(u'Vector valued function derivative example',
u'vcwvzUVLPw0',
u'vector-valued-function-derivative-example'),
(u'Line Integrals and Vector Fields',
u't3cJYNdQLYg',
u'line-integrals-and-vector-fields'),
(u'Using a line integral to find the work done by a vector field example',
u'AFF8FXxt5os',
u'using-a-line-integral-to-find-the-work-done-by-a-vector-field-example'),
(u'Parametrization of a Reverse Path',
u'eGRZKkmI_fo',
u'parametrization-of-a-reverse-path'),
(u'Scalar Field Line Integral Independent of Path Direction',
u'99pD1-6ZpuM',
u'scalar-field-line-integral-independent-of-path-direction'),
(u'Vector Field Line Integrals Dependent on Path Direction',
u'fuSOY9r1R6w',
u'vector-field-line-integrals-dependent-on-path-direction'),
(u'Path Independence for Line Integrals',
u'K_fgnCJOI8I',
u'path-independence-for-line-integrals'),
(u'Closed Curve Line Integrals of Conservative Vector Fields',
u'I2dbzp0zHuw',
u'closed-curve-line-integrals-of-conservative-vector-fields'),
(u'Example of Closed Line Integral of Conservative Field',
u'Q9t1LghwdGc',
u'example-of-closed-line-integral-of-conservative-field'),
(u'Second Example of Line Integral of Conservative Vector Field',
u'LpY8Qa3IP1w',
u'second-example-of-line-integral-of-conservative-vector-field'),
(u"Green's Theorem Proof Part 1",
u'l5zJvZKfMYE',
u'green-s-theorem-proof-part-1'),
(u"Green's Theorem Proof (part 2)",
u'qdFD-0OWBRo',
u'green-s-theorem-proof--part-2'),
(u"Green's Theorem Example 1",
u'gGXnILbrhsM',
u'green-s-theorem-example-1'),
(u"Green's Theorem Example 2",
u'sSyPAAyL8nQ',
u'green-s-theorem-example-2'),
(u'Introduction to Parametrizing a Surface with Two Parameters',
u'owKAHXf1y1A',
u'introduction-to-parametrizing-a-surface-with-two-parameters'),
(u'Determining a Position Vector-Valued Function for a Parametrization of Two Parameters',
u'bJ_09eoCmag',
u'determining-a-position-vector-valued-function-for-a-parametrization-of-two-parameters'),
(u'Partial Derivatives of Vector-Valued Functions',
u'c7ByaI3T7Dc',
u'partial-derivatives-of-vector-valued-functions'),
(u'Introduction to the Surface Integral',
u'9k97m8oWnaY',
u'introduction-to-the-surface-integral'),
(u'Example of calculating a surface integral part 1',
u'7sQCcGlK2bY',
u'example-of-calculating-a-surface-integral-part-1'),
(u'Example of calculating a surface integral part 2',
u'qQAhhithHa8',
u'example-of-calculating-a-surface-integral-part-2'),
(u'Example of calculating a surface integral part 3',
u's2_NTiISZl4',
u'example-of-calculating-a-surface-integral-part-3')],
'CaliforniaStandardsTestAlgebraI': [(u'CA Algebra I Number Properties and Absolute Value',
u'ZouQdHSyelg',
u'ca-algebra-i--number-properties-and-absolute-value'),
(u'CA Algebra I Simplifying Expressions',
u'Hfihqi82M4A',
u'ca-algebra-i--simplifying-expressions'),
(u'CA Algebra I Simple Logical Arguments',
u'yIzF_XGX4qk',
u'ca-algebra-i--simple-logical-arguments'),
(u'CA Algebra I Graphing Inequalities',
u'KZ8Vw_Nim8U',
u'ca-algebra-i--graphing-inequalities'),
(u'CA Algebra I Slope and Y-intercept',
u'31v-n2ND2VE',
u'ca-algebra-i--slope-and-y-intercept'),
(u'CA Algebra I Systems of Inequalities',
u'1piZ8oYWh3E',
u'ca-algebra-i--systems-of-inequalities'),
(u'CA Algebra I Simplying Expressions',
u'ri-0v6vqcKM',
u'ca-algebra-i--simplying-expressions'),
(u'CA Algebra I Factoring Quadratics',
u'K5ggNnKTmNM',
u'ca-algebra-i--factoring-quadratics'),
(u'CA Algebra I Completing the Square',
u'8M4c8TB3Cdc',
u'ca-algebra-i--completing-the-square'),
(u'CA Algebra I Quadratic Equation',
u'tSNtCg7o7bA',
u'ca-algebra-i--quadratic-equation'),
(u'CA Algebra I Quadratic Roots',
u'uA6mcx4FMN8',
u'ca-algebra-i--quadratic-roots'),
(u'CA Algebra I Rational Expressions',
u'K4VyHxglUts',
u'ca-algebra-i--rational-expressions'),
(u'CA Algebra I Rational Expressions',
u'C5xQP8RmHxE',
u'ca-algebra-i--rational-expressions'),
(u'CA Algebra I Word Problems',
u'PP23clmV9Hw',
u'ca-algebra-i--word-problems'),
(u'CA Algebra I More Word Problems',
u'ch5tDNaeuxc',
u'ca-algebra-i--more-word-problems'),
(u'CA Algebra I Functions',
u'NRB6s77nx2g',
u'ca-algebra-i--functions')],
'CaliforniaStandardsTestAlgebraII': [(u'California Standards Test Algebra II',
u'wzEVAd6ezZU',
u'california-standards-test--algebra-ii'),
(u'California Standards Test Algebra II (Graphing Inequalities',
u'UzvOjuJZVJ0',
u'california-standards-test--algebra-ii--graphing-inequalities'),
(u'CA Standards Algebra II (Algebraic Division/Multiplication)',
u'sMrmuoehZpY',
u'ca-standards--algebra-ii--algebraic-division-multiplication'),
(u'CA Standards Algebra II',
u'q9m5VZMYEyw',
u'ca-standards---algebra-ii'),
(u'Algebra II Simplifying Polynomials',
u'WB7gPfsv6rQ',
u'algebra-ii--simplifying-polynomials'),
(u'Algebra II Imaginary and Complex Numbers',
u'C-2Ln0pK3kY',
u'algebra-ii--imaginary-and-complex-numbers'),
(u'Algebra II Complex numbers and conjugates',
u'e3W8o6M-7gg',
u'algebra-ii--complex-numbers-and-conjugates'),
(u'Algebra II Quadratics and Shifts',
u'GHDrDdu6vrU',
u'algebra-ii--quadratics-and-shifts'),
(u'Algebra II Shifting Quadratic Graphs',
u'X9rTIwc1wRU',
u'algebra-ii--shifting-quadratic-graphs'),
(u'Algebra || Conic Sections',
u'74oju-0NExU',
u'algebra-----conic-sections'),
(u'Algebra II Circles and Logarithms',
u'QwOcCoHsZfM',
u'algebra-ii--circles-and-logarithms'),
(u'Algebra II Logarithms Exponential Growth',
u'BcjutHIUjxQ',
u'algebra-ii--logarithms-exponential-growth'),
(u'Algebra II Logarithms and more',
u'9Z1WpYN-tBE',
u'algebra-ii--logarithms-and-more'),
(u'Algebra II Functions. Combinatorics',
u'Y5BukhTmSHE',
u'algebra-ii--functions--combinatorics'),
(u'Algebra II binomial Expansion and Combinatorics',
u'xTxv9Wukjiw',
u'algebra-ii--binomial-expansion-and-combinatorics'),
(u'Algebra II Binomial Expansions. Geometric Series Sum',
u'EwKWzFv3Ul8',
u'algebra-ii--binomial-expansions--geometric-series-sum'),
(u'Algebra II Functions and Probability',
u'ZGJU7aqE3mY',
u'algebra-ii--functions-and-probability'),
(u'Algebra II Probability and Statistics',
u'UXO9kJ3jlhk',
u'algebra-ii--probability-and-statistics'),
(u'Algebra II Mean and Standard Deviation',
u'i0W4KcxE-mI',
u'algebra-ii--mean-and-standard-deviation')],
'CaliforniaStandardsTestGeometry': [(u'CA Geometry deductive reasoning',
u'GluohfOedQE',
u'ca-geometry--deductive-reasoning'),
(u'CA Geometry Proof by Contradiction',
u'u6O0YHyarlI',
u'ca-geometry--proof-by-contradiction'),
(u'CA Geometry More Proofs',
u'4PPMnI8-Zsc',
u'ca-geometry--more-proofs'),
(u'CA Geometry Similar Triangles',
u'bWTtHKSEcdI',
u'ca-geometry--similar-triangles'),
(u'CA Geometry Similar Triangles',
u'iOLN43V1Lmw',
u'ca-geometry--similar-triangles'),
(u'CA Geometry More on congruent and similar triangles',
u'FVSgVMVZZ-4',
u'ca-geometry--more-on-congruent-and-similar-triangles'),
(u'CA Geometry Triangles and Parallelograms',
u'h0FFEBHBufo',
u'ca-geometry--triangles-and-parallelograms'),
(u'CA Geometry Area. Pythagorean Theorem',
u'jRrRqMJbHKc',
u'ca-geometry--area--pythagorean-theorem'),
(u'CA Geometry Area. Circumference. Volume',
u'BJSk1joCQsM',
u'ca-geometry--area--circumference--volume'),
(u'CA Geometry Pythagorean Theorem. Area',
u'vaOXkt7uuac',
u'ca-geometry--pythagorean-theorem--area'),
(u'CA Geometry Exterior Angles',
u'Ncg1HB5uVLc',
u'ca-geometry--exterior-angles'),
(u'CA Geometry Deducing Angle Measures',
u'_HJljJuVHLw',
u'ca-geometry--deducing-angle-measures'),
(u'CA Geometry Pythagorean Theorem. Compass Constructions',
u'6EY0E3z-hsU',
u'ca-geometry--pythagorean-theorem--compass-constructions'),
(u'CA Geometry | |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.parsing_ops."""
import itertools
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Helpers for creating Example objects
example = example_pb2.Example
feature = feature_pb2.Feature
features = lambda d: feature_pb2.Features(feature=d)
bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v))
int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v))
float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v))
# Helpers for creating SequenceExample objects
feature_list = lambda l: feature_pb2.FeatureList(feature=l)
feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d)
sequence_example = example_pb2.SequenceExample
def empty_sparse(dtype, shape=None):
if shape is None:
shape = [0]
return (np.empty(shape=(0, len(shape)), dtype=np.int64),
np.array([], dtype=dtype), np.array(shape, dtype=np.int64))
def flatten(list_of_lists):
"""Flatten one level of nesting."""
return itertools.chain.from_iterable(list_of_lists)
def flatten_values_tensors_or_sparse(tensors_list):
"""Flatten each SparseTensor object into 3 Tensors for session.run()."""
return list(
flatten([[v.indices, v.values, v.dense_shape] if isinstance(
v, sparse_tensor.SparseTensor) else [v] for v in tensors_list]))
def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
flat_output):
tester.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))
i = 0 # Index into the flattened output of session.run()
for k, v in dict_tensors.items():
expected_v = expected_tensors[k]
tf_logging.info("Comparing key: %s", k)
if isinstance(v, sparse_tensor.SparseTensor):
# Three outputs for SparseTensor : indices, values, shape.
tester.assertEqual([k, len(expected_v)], [k, 3])
tester.assertAllEqual(expected_v[0], flat_output[i])
tester.assertAllEqual(expected_v[1], flat_output[i + 1])
tester.assertAllEqual(expected_v[2], flat_output[i + 2])
i += 3
else:
# One output for standard Tensor.
tester.assertAllEqual(expected_v, flat_output[i])
i += 1
class ParseExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
out = parsing_ops.parse_single_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
return
else:
# Returns dict w/ Tensors and SparseTensors.
out = parsing_ops.parse_single_example(**kwargs)
# Also include a test with the example names specified to retain
# code coverage of the unfused version, and ensure that the two
# versions produce the same results.
out_with_example_name = parsing_ops.parse_single_example(
example_names="name", **kwargs)
for result_dict in [out, out_with_example_name]:
result = flatten_values_tensors_or_sparse(result_dict.values())
# Check values.
tf_result = self.evaluate(result)
_compare_output_to_expected(self, result_dict, expected_values,
tf_result)
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(tuple(out[k].get_shape().as_list()), f.shape)
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(out[k].dense_shape.get_shape().as_list()), (1,))
@test_util.run_deprecated_v1
def testEmptySerializedWithAllDefaults(self):
sparse_name = "st_a"
a_name = "a"
b_name = "b"
c_name = "c:has_a_tricky_name"
a_default = [0, 42, 0]
b_default = np.random.rand(3, 3).astype(bytes)
c_default = np.random.rand(2).astype(np.float32)
expected_st_a = ( # indices, values, shape
np.empty((0, 1), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([0], dtype=np.int64)) # max_elems = 0
expected_output = {
sparse_name: expected_st_a,
a_name: np.array([a_default]),
b_name: np.array(b_default),
c_name: np.array(c_default),
}
self._test({
"serialized": ops.convert_to_tensor(""),
"features": {
sparse_name:
parsing_ops.VarLenFeature(dtypes.int64),
a_name:
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
b_name:
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
c_name:
parsing_ops.FixedLenFeature(
(2,), dtypes.float32, default_value=c_default),
}
}, expected_output)
def testEmptySerializedWithoutDefaultsShouldFail(self):
input_features = {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=[0, 42, 0]),
"b":
parsing_ops.FixedLenFeature(
(3, 3),
dtypes.string,
default_value=np.random.rand(3, 3).astype(bytes)),
# Feature "c" is missing a default, this gap will cause failure.
"c":
parsing_ops.FixedLenFeature(
(2,), dtype=dtypes.float32),
}
# Edge case where the key is there but the feature value is empty
original = example(features=features({"c": feature()}))
self._test(
{
"serialized": original.SerializeToString(),
"features": input_features,
},
expected_err=(errors_impl.OpError,
"Feature: c \\(data type: float\\) is required"))
# Standard case of missing key and value.
self._test(
{
"serialized": "",
"features": input_features,
},
expected_err=(errors_impl.OpError,
"Feature: c \\(data type: float\\) is required"))
def testDenseNotMatchingShapeShouldFail(self):
original = example(features=features({
"a": float_feature([-1, -1]),
}))
serialized = original.SerializeToString()
self._test(
{
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature((1, 3), dtypes.float32)
}
},
# TODO(mrry): Consider matching the `io.parse_example()` error message.
expected_err=(errors_impl.OpError, "Key: a."))
def testDenseDefaultNoShapeShouldFail(self):
original = example(features=features({
"a": float_feature([1, 1, 3]),
}))
serialized = original.SerializeToString()
self._test(
{
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature(None, dtypes.float32)
}
},
expected_err=(ValueError, "Missing shape for feature a"))
@test_util.run_deprecated_v1
def testSerializedContainingSparse(self):
original = [
example(features=features({
"st_c": float_feature([3, 4])
})),
example(features=features({
"st_c": float_feature([]), # empty float list
})),
example(features=features({
"st_d": feature(), # feature with nothing in it
})),
example(features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
}))
]
expected_outputs = [{
"st_c": (np.array([[0], [1]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([2], dtype=np.int64)),
"st_d":
empty_sparse(bytes)
}, {
"st_c": empty_sparse(np.float32),
"st_d": empty_sparse(bytes)
}, {
"st_c": empty_sparse(np.float32),
"st_d": empty_sparse(bytes)
}, {
"st_c": (np.array([[0], [1], [2]], dtype=np.int64),
np.array([1.0, 2.0, -1.0], dtype=np.float32),
np.array([3], dtype=np.int64)),
"st_d": (np.array([[0]], dtype=np.int64), np.array(["hi"], dtype=bytes),
np.array([1], dtype=np.int64))
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"st_c": parsing_ops.VarLenFeature(dtypes.float32),
"st_d": parsing_ops.VarLenFeature(dtypes.string)
},
}, expected_output)
def testSerializedContainingSparseFeature(self):
original = [
example(features=features({
"val": float_feature([3, 4]),
"idx": int64_feature([5, 10])
})),
example(features=features({
"val": float_feature([]), # empty float list
"idx": int64_feature([])
})),
example(features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(features=features({
"val": float_feature([1, 2, -1]),
"idx":
int64_feature([0, 9, 3]) # unsorted
}))
]
expected_outputs = [{
"sp": (np.array([[5], [10]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([13], dtype=np.int64))
}, {
"sp": empty_sparse(np.float32, shape=[13])
}, {
"sp": empty_sparse(np.float32, shape=[13])
}, {
"sp": (np.array([[0], [3], [9]], dtype=np.int64),
np.array([1.0, -1.0, 2.0], dtype=np.float32),
np.array([13], dtype=np.int64))
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.float32,
[13])
}
}, expected_output)
def testSerializedContainingSparseFeatureReuse(self):
original = [
example(features=features({
"val1": float_feature([3, 4]),
"val2": float_feature([5, 6]),
"idx": int64_feature([5, 10])
})),
example(features=features({
"val1": float_feature([]), # empty float list
"idx": int64_feature([])
})),
]
expected_outputs = [{
"sp1": (np.array([[5], [10]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([13], dtype=np.int64)),
"sp2": (np.array([[5], [10]], dtype=np.int64),
np.array([5.0, 6.0], dtype=np.float32),
np.array([7], dtype=np.int64))
}, {
"sp1": empty_sparse(np.float32, shape=[13]),
"sp2": empty_sparse(np.float32, shape=[7])
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"sp1":
parsing_ops.SparseFeature("idx", "val1", dtypes.float32, 13),
"sp2":
parsing_ops.SparseFeature(
"idx",
"val2",
dtypes.float32,
size=7,
already_sorted=True)
}
}, expected_output)
def testSerializedContaining3DSparseFeature(self):
original = [
example(features=features({
"val": float_feature([3, 4]),
"idx0": int64_feature([5, 10]),
"idx1": int64_feature([0, 2]),
})),
example(features=features({
"val": float_feature([]), # empty float list
"idx0": int64_feature([]),
"idx1": int64_feature([]),
})),
example(features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(features=features({
"val": float_feature([1, 2, -1]),
"idx0": int64_feature([0, 9, 3]), # unsorted
"idx1": int64_feature([1, 0, 2]),
}))
]
expected_outputs = [{
"sp": (np.array([[5, 0], [10, 2]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([13, 3], dtype=np.int64))
}, {
"sp": empty_sparse(np.float32, shape=[13, 3])
}, {
"sp": empty_sparse(np.float32, shape=[13, 3])
}, {
"sp": (np.array([[0, 1], [3, 2], [9, 0]], dtype=np.int64),
np.array([1.0, -1.0, 2.0], dtype=np.float32),
np.array([13, 3], dtype=np.int64))
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"sp":
parsing_ops.SparseFeature(["idx0", "idx1"], "val",
dtypes.float32, [13, 3])
}
}, expected_output)
def testSerializedContainingDense(self):
aname = "a"
bname = "b*has+a:tricky_name"
original = [
example(features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
})), example(features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b""]),
}))
]
# pylint: disable=too-many-function-args
expected_outputs = [
{
aname:
np.array([1, 1], dtype=np.float32).reshape(1, 2, 1),
bname:
np.array(["b0_str"], dtype=bytes).reshape(
1, 1, 1, 1)
},
{
aname:
np.array([-1, -1], dtype=np.float32).reshape(1, 2, 1),
bname:
np.array([""], dtype=bytes).reshape(
1, 1, 1, 1)
}
]
# pylint: enable=too-many-function-args
for proto, expected_output in zip(original, expected_outputs):
# No defaults, values required
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature(
(1, 1, 1, 1), dtype=dtypes.string),
}
}, expected_output)
# This test is identical as the previous one except
# for the creation of 'serialized'.
def testSerializedContainingDenseWithConcat(self):
aname = "a"
bname = "b*has+a:tricky_name"
# TODO(lew): Feature appearing twice should be an error in future.
original = [
(example(features=features({
aname: float_feature([10, 10]),
})), example(features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
}))),
(
example(features=features({
bname: bytes_feature([b"b100"]),
})),
example(features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"]),
})),),
]
# pylint: disable=too-many-function-args
expected_outputs = [
{
aname:
np.array([1, 1], dtype=np.float32).reshape(1, 2, 1),
bname:
np.array(["b0_str"], dtype=bytes).reshape(
1, 1, 1, 1)
},
{
aname:
np.array([-1, -1], dtype=np.float32).reshape(1, 2, 1),
bname:
np.array(["b1"], dtype=bytes).reshape(
1, 1, 1, 1)
}
]
# pylint: enable=too-many-function-args
for (m, n), expected_output in zip(original, expected_outputs):
# No defaults, values required
self._test({
"serialized":
ops.convert_to_tensor(
m.SerializeToString() | |
<gh_stars>1-10
from colors import Colors
import copy
# atributos dos tokens
token = 0
linha = 1
coluna = 2
tipo = 3
class Semantico:
token = list()
tokens = list()
linhaToken = 0
pilha = list()
escopo = list()
pilha_execucao = list()
semente = 0
tabela = list()
msg = ''
sinaliza_tipo = False
sinaliza_inserir = False
sinaliza_procedimento = None
sequencia_parametros = []
ultimo_token_buscado = []
tabela_hipo = []
resultado = False
def __init__(self, tokens_de_entrada):
self.tokens = tokens_de_entrada
self.escopo.append(['0', 'livre'])
if (self.programa()):
self.resultado = True
print(Colors().sucess, "\n########SEMÂNTICO COM SUCESSO!!!##########\n", Colors().reset)
print("Tabela de simbolos:")
for x in range(len(self.tabela)):
msg = str(x) + ": ['" + str(self.tabela[x][0]) + "', " + str(self.tabela[x][1][-1]) + ", '" + str(self.tabela[x][2]) + "']"
print(msg)
else:
print("\nPilha de erros:")
for x in range(len(self.pilha)):
print(x, self.pilha[x])
print(Colors().danger, "\n\n########ERRO NO SEMÂNTICO########")
print(self.pilha[-1])
print(self.msg)
def programa(self):
self.nextToken()
if (self.token[token] == "program"):
self.semente += 1
self.escopo.append([self.semente, 'estrito'])
self.nextToken()
if (self.token[tipo] == "Identificador"):
#print('Iniciado inserir em: program > ident')
self.pilha += ['Erro ao inserir o token: ' + str(self.token)]
if(self.inserir([self.token[token], self.escopo, 'program', ''], True)):
self.pilha.pop()
#print('Terminado de inserir em: program > ident\n')
self.nextToken()
if (self.corpo()):
self.nextToken()
if (self.token[token] == '.'):
self.escopo.pop()
return True
return False
#simbolo, escopo([semente, tipo]), tipo, valor
def buscar(self, linha):
#print(' abrindo função buscar')
#print(' buscando:', linha)
lista = list()
flag = False
for x in self.pilha_execucao:
if(x[0] == linha[0]):
if(flag):
#print(' encontrado correspondência em for da pilha')
lista.append(x)
else:
flag = True
for x in self.tabela:
if(x[0] == linha[0]):
#print(' encontrado correspondência em for da tabela')
lista.append(x)
#print(' itens para veificar:')
#for x in lista:
#print(' ', x)
c_linha = copy.deepcopy(linha)
# for faz do tamanha da pilha de escopo menos 1 até chegar na raiz
for x in range(len(c_linha[1]) - 1):
#c_linha na ultima posição de escopo em tipo
if(c_linha[1][-1][1] == 'livre'):
for y in lista:
if(c_linha[1] == y[1]):
#print(Colors().blue, ' achou', Colors().reset, c_linha, 'já existe na tabela de simbolos!')
self.msg = 'Token ' + c_linha[0] + ' já declarada!'
self.ultimo_token_buscado = y
#print(' ultimo token buscado:', self.ultimo_token_buscado)
if(not self.sinaliza_tipo):
self.sinaliza_tipo = y
#print(' alterando sinaliza tipo para:', self.sinaliza_tipo)
#print(' fechando função buscar e retornando true')
return True
#print(' antes do pop:', c_linha[1])
c_linha[1].pop()
#print(' depois do pop:', c_linha[1])
else:
for y in lista:
if (c_linha[1] == y[1]):
#print(Colors().blue, ' achou', Colors().reset, c_linha, 'já existe na tabela de simbolos!')
self.msg = 'Token ' + c_linha[0] + ' já declarada!'
self.ultimo_token_buscado = y
#print(' ultimo token buscado:', self.ultimo_token_buscado)
if (not self.sinaliza_tipo):
self.sinaliza_tipo = y
#print(' alterando sinaliza tipo para:', self.sinaliza_tipo)
#print(' fechando função buscar e retornando true')
return True
#print(' id ainda', Colors().blue, 'nao existe!', Colors().reset)
#print(' fechando função buscar e retornando false')
return False
def buscar2(self, nome):
for x in self.tabela:
if(x[0] == nome):
return x
return False
def inserir(self, linha, tabela):
#print(' abrindo função inserir')
if(tabela):
#print(' destino: tabela')
if(self.buscar(linha)):
#print(Colors().danger, ' erro', Colors().reset, 'id', linha, 'já existe na', Colors().danger, 'tabela de simbolos', Colors().reset)
#print(' fechando função inserir e retornando falso')
return False
else:
self.tabela.append(copy.deepcopy(linha))
#print(Colors().sucess, ' sucesso', Colors().reset, linha, 'inserido na', Colors().sucess, 'tabela de simbolos', Colors().reset)
#print(' ultimo item tabela:', self.tabela[-1])
#verifica se é sinaliza procedimento
if(self.sinaliza_procedimento is not None):
#print('Sinaliza procedimento retornou Verdadeiro')
#busca na tabela de simbolos o sinaliza procedimento
x = self.buscar2(self.sinaliza_procedimento)
#recupera tabela de simbolos os tipos dos sinaliza procedimento e concateno o novo atual
#atualiza tabela de simbolos
x[2] += ',' + linha[2]
#print('Atualizado tabela de simbolos para', x)
#print(' fechando função inserir e retornando true')
return True
else:
#print(' destino:pilha de execução')
if (self.buscar(linha)):
#print(Colors().danger, 'erro', Colors().reset, 'id', linha, 'já existe na pilha de exucução')
#print(' fechando função inserir e retornando falso')
return False
else:
self.pilha_execucao.append(copy.deepcopy(linha))
#print(Colors().sucess, ' sucesso', Colors().reset, linha, 'inserido na', Colors().blue, 'pilha de execução', Colors().reset)
#print(' ultimo item pilha:', self.pilha_execucao[-1])
#print(' fechando função inserir e retornando true')
return True
def aplicarTipo(self, tipo):
for x in self.pilha_execucao:
#print('Iniciado inserir em: aplicarTipo')
x[2] = tipo
if(not self.inserir(x, True)):
self.pilha += ['erro ao inserir os tokens da linha: ' + str(self.token[linha])]
self.msg += '\nerro em aplicarTipo'
return False
#print('Terminado inserir em: aplicarTipo\n')
self.pilha_execucao.clear()
return True
def comparar(self, tipo):
#print(' abrindo função comparar')
#print(' ultimo buscado:', tipo)
#print(' sinalizatipo:', self.sinaliza_tipo)
if(not self.sinaliza_tipo):
self.sinaliza_tipo = copy.deepcopy(tipo)
#print(' fechando função comparar, add sinaliza_tipo e retornando true')
return True
elif(tipo[2] != self.sinaliza_tipo[2]):
#print(' fechando função comparar e retornando falso')
self.msg = 'operação com tipos diferentes'
return False
#print(' fechando função comparar e retornando true')
return True
def nextToken(self):
self.token = self.tokens[self.linhaToken]
self.linhaToken += 1
if (self.token[tipo] == "Comentário"):
#print("encontrado", Colors().blue, "Comentário", Colors().reset)
self.nextToken()
def prevToken(self):
self.linhaToken -= 1
self.token = self.tokens[self.linhaToken]
def corpo(self):
if (self.dc()):
self.nextToken()
if (self.token[token] == "begin"):
self.nextToken()
self.semente += 1
self.escopo.append([self.semente, 'livre'])
if (self.comandos()):
self.escopo.pop()
self.nextToken()
if (self.token[token] == "end"):
return True
return False
def dc(self):
dc_v = self.dc_v()
if (dc_v or dc_v == 'Deu ruim'):
if (dc_v == 'Deu ruim'):
return False
else:
self.nextToken()
if (self.mais_dc()):
return True
return False
elif (not dc_v):
dc_p = self.dc_p()
if (dc_p or dc_p == 'Deu ruim'):
if (dc_p == 'Deu ruim'):
return False
else:
self.nextToken()
if (self.mais_dc()):
return True
return False
elif (' '):
self.prevToken()
return True
def mais_dc(self):
if (self.token[token] == ';'):
self.nextToken()
if (self.dc()):
return True
return False
elif (' '):
self.prevToken()
return True
def dc_v(self):
if (self.token[token] == "var"):
self.pilha_execucao.clear()
self.nextToken()
self.sinaliza_inserir = True
if (self.variaveis()):
self.sinaliza_inserir = False
self.nextToken()
if (self.token[token] == ':'):
self.nextToken()
if (self.tipo_var()):
return True
return 'Deu ruim'
return False
def tipo_var(self):
if (self.token[token] == "real"):
if(self.aplicarTipo('real')):
return True
elif (self.token[token] == "integer"):
if(self.aplicarTipo('integer')):
return True
return False
def variaveis(self):
if (self.token[tipo] == "Identificador"):
if (self.sinaliza_inserir):
#print('Iniciado inserir em: variaveis > ident')
self.pilha += ['erro ao inserir o token: ' + str(self.token)]
if(self.inserir([self.token[token], self.escopo, 'ident', ''], False)):
self.pilha.pop()
#print('Terminado inserir em: variaveis > ident\n')
self.nextToken()
if (self.mais_var()):
return True
elif(self.buscar([self.token[token], self.escopo, 'ident', ''])):
##par.retorno.append([self.token[token], self.escopo, 'ident', '', ''])
self.nextToken()
if (self.mais_var()):
return True
else:
exit('\nErro na linha ' + str(self.token[linha]) + '. Variavel ' + str(self.token[token]) + ' não existe.')
return False
def mais_var(self):
if (self.token[token] == ','):
self.nextToken()
if (self.variaveis()):
return True
return False
elif (' '):
self.prevToken()
return True
def dc_p(self):
if (self.token[token] == "procedure"):
self.nextToken()
if (self.token[tipo] == "Identificador"):
#print('Iniciado inserir em procedure > ident')
self.pilha += ['erro ao inserir o token:' + str(self.token)]
if(self.inserir([self.token[token], self.escopo, 'procedure', ''], True)):
self.pilha.pop()
#print('Finalizado inserir em procedure > ident\n')
self.semente += 1
self.escopo.append([self.semente, 'estrito'])
self.sinaliza_procedimento = self.token[token]
self.nextToken()
if (self.parametros()):
self.nextToken()
self.sinaliza_procedimento = None
if (self.corpo()):
self.escopo.pop()
return True
return 'Deu ruim'
return False
def parametros(self):
if (self.token[token] == '('):
self.nextToken()
if (self.lista_par()):
self.nextToken()
if (self.token[token] == ')'):
return True
return False
elif (' '):
self.prevToken()
return True
def lista_par(self):
self.sinaliza_inserir = True
if (self.variaveis()):
self.sinaliza_inserir = False
self.nextToken()
if (self.token[token] == ':'):
self.nextToken()
if (self.tipo_var()):
self.nextToken()
if (self.mais_par()):
return True
return False
def mais_par(self):
if (self.token[token] == ';'):
self.nextToken()
if (self.lista_par()):
return True
return False
elif (' '):
self.prevToken()
return True
def corpo_p(self):
if (self.dc_loc()):
self.nextToken()
if (self.token[token] == "begin"):
self.nextToken()
if (self.comandos()):
self.nextToken()
if (self.token[token] == "end"):
return True
return False
def dc_loc(self):
if (self.dc_v()):
self.nextToken()
if (self.mais_dcloc()):
return True
return False
elif (' '):
return True
def mais_dcloc(self):
if (self.token[token] == ';'):
self.nextToken()
if (self.dc_loc()):
return True
return False
elif (' '):
self.prevToken()
return True
def lista_arg(self):
if (self.token[token] == '('):
self.nextToken()
if (self.ultimo_token_buscado[2].startswith('procedure')):
self.sequencia_parametros = self.ultimo_token_buscado[2].split(',')
self.sequencia_parametros.pop(0)
if (self.argumentos()):
self.nextToken()
self.sequencia_parametros.clear
if (self.token[token] == ')'):
return True
return False
elif (' '):
self.prevToken()
return True
def argumentos(self):
if(self.sequencia_parametros != []):
if (self.token[tipo] == 'Identificador'):
#print('Iniciado buscar em argumentos > ident')
self.pilha += ['erro ao buscar o token: ' + str(self.token)]
self.msg = 'Token ' + str(self.token[token]) + ' ainda não foi declarado!'
if(self.buscar([self.token[token], self.escopo, 'ident', ''])):
self.pilha.pop()
#print('Terminado buscar em argumentos > ident\n')
if(self.ultimo_token_buscado[2] == self.sequencia_parametros.pop(0)):
#print('ok,', self.ultimo_token_buscado[2], 'na sequencia certa')
self.nextToken()
if (self.mais_ident()):
return True
exit('Erro na linha ' + str(self.token[1]) + ' Parametro do procedimento | |
import os
import sys
import warnings
import importlib
import inspect
import os.path as osp
import numpy as np
import tensorflow as tf
from tensorflow.keras.utils import Sequence
from tensorflow.python.keras import callbacks as callbacks_module
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.callbacks import History
from torch.utils.data import DataLoader, Dataset
import graphgallery as gg
from graphgallery import functional as gf
from graphgallery.data.io import makedirs_from_filepath
from graphgallery.gallery import Model
from graphgallery.utils import Progbar
# TensorFlow 2.1.x
# Ignora warnings:
# UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.
# This is caused by `tf.gather` and it will be solved in future tensorflow version.
warnings.filterwarnings(
'ignore',
message='.*Converting sparse IndexedSlices to a dense Tensor of unknown shape.*')
# TensorFlow 2.4.0
# Ignora warnings:
# UserWarning: Converting sparse IndexedSlices(IndexedSlices(indices=...) to a dense Tensor of unknown shape.
# This may consume a large amount of memory.
warnings.filterwarnings(
'ignore', message='.*to a dense Tensor of unknown shape.*')
def format_doc(d):
msg = ""
for i, (k, v) in enumerate(d.items()):
if v != "UNSPECIDIED":
msg += f"({i + 1}) `{k}`, Default is `{v}` \n"
else:
msg += f"({i + 1}) `{k}`, UNSPECIDIED argument\n"
return msg
def doc_dict(func):
ArgSpec = inspect.getfullargspec(func)
args = ArgSpec.args if ArgSpec.args else []
args = args[1:] if args[0] == "self" else args
defaults = ArgSpec.defaults if ArgSpec.defaults else []
delta_l = len(args) - len(defaults)
defaults = ["UNSPECIDIED"] * delta_l + list(defaults)
d = dict(zip(args, defaults))
return d
def make_docs(*func):
d = {}
for f in func:
d.update(doc_dict(f))
return format_doc(d)
def unravel_batch(batch):
inputs = labels = out_index = None
if isinstance(batch, (list, tuple)):
inputs = batch[0]
labels = batch[1]
if len(batch) > 2:
out_index = batch[-1]
else:
inputs = batch
if isinstance(labels, (list, tuple)) and len(labels) == 1:
labels = labels[0]
if isinstance(out_index, (list, tuple)) and len(out_index) == 1:
out_index = out_index[0]
return inputs, labels, out_index
class Trainer(Model):
def setup_cfg(self):
"""load the default config function `default_cfg_setup` for the corresponding task.
Raises
------
RuntimeError
the default config function `default_cfg_setup` not found in the file `graphgallery.gallery.[task].default`
"""
# nodeclas/linkpred/...
task_module = self.__module__.split('.')[2]
# graphgallery.gallery
gallery_module = '.'.join(__name__.split('.')[:-1])
try:
default_setup = importlib.import_module(f".{task_module}.default", gallery_module)
except ModuleNotFoundError:
raise RuntimeError(f"default setup function `{gallery_module}.{task_module}.default.default_cfg_setup` not found!")
default_setup.default_cfg_setup(self.cfg)
@np.deprecate(old_name="make_data",
message=("the method `trainer.make_data` is currently deprecated from 0.9.0,"
" please use `trainer.setup_graph` instead."))
def make_data(self, *args, **kwargs):
return self.setup_graph(*args, **kwargs)
def setup_graph(self, graph, graph_transform=None, device=None, **kwargs):
"""This method is used for process your inputs, which accepts
only keyword arguments in your defined method 'data_step'.
This method will process the inputs, and transform them into tensors.
Commonly used keyword arguments:
--------------------------------
graph: graphgallery graph classes.
graph_transform: string, Callable function,
or a tuple with function and dict arguments.
transform for the entire graph, it is used first.
device: device for preparing data, if None, it defaults to `self.device`
adj_transform: string, Callable function,
or a tuple with function and dict arguments.
transform for adjacency matrix.
attr_transform: string, Callable function,
or a tuple with function and dict arguments.
transform for attribute matrix.
other arguments (if have) will be passed into method 'data_step'.
"""
self.empty_cache()
model = self.model
if model is not None and hasattr(model, 'empty_cache'):
model.empty_cache()
self.graph = gf.get(graph_transform)(graph)
cfg = self.cfg.data
if device is not None:
self.data_device = gf.device(device, self.backend)
else:
self.data_device = self.device
cfg.device = device
_, kwargs = gf.wrapper(self.data_step)(**kwargs)
kwargs['graph_transform'] = graph_transform
cfg.merge_from_dict(kwargs)
for k, v in kwargs.items():
if k.endswith("transform"):
setattr(self.transform, k, gf.get(v))
return self
def data_step(self, *args, **kwargs):
"""Implement you data processing function here"""
raise NotImplementedError
def build(self, **kwargs):
"""This method is used for build your model, which
accepts only keyword arguments in your defined method 'model_step'.
Note:
-----
This method should be called after `process`.
Commonly used keyword arguments:
--------------------------------
hids: int or a list of them,
hidden units for each hidden layer.
acts: string or a list of them,
activation functions for each layer.
dropout: float scalar,
dropout used in the model.
lr: float scalar,
learning rate used for the model.
weight_decay: float scalar,
weight decay used for the model weights.
bias: bool,
whether to use bias in each layer.
use_tfn: bool,
this argument is only used for TensorFlow backend, if `True`, it will decorate
the model training and testing with `tf.function` (See `graphgallery.nn.modes.TFKeras`).
By default, it was `True`, which can accelerate the training and inference, by it may cause
several errors.
other arguments (if have) will be passed into your method 'model_step'.
"""
if self._graph is None:
raise RuntimeError("Please call 'trainer.setup_graph(graph)' first.")
use_tfn = kwargs.get("use_tfn", True)
if self.backend == "tensorflow":
with tf.device(self.device):
self.model, kwargs = gf.wrapper(self.model_step)(**kwargs)
if use_tfn:
self.model.use_tfn()
else:
kwargs.pop("use_tfn", None)
model, kwargs = gf.wrapper(self.model_step)(**kwargs)
self.model = model.to(self.device)
self.cfg.model.merge_from_dict(kwargs)
return self
def model_step(self, *args, **kwargs):
"""Implement you model building function here"""
raise NotImplementedError
def fit(self, train_data, val_data=None, **kwargs):
cache = self.cache
cfg = self.cfg.fit
cfg.merge_from_dict(kwargs)
ckpt_cfg = cfg.ModelCheckpoint
es_cfg = cfg.EarlyStopping
pb_cfg = cfg.Progbar
log_cfg = cfg.Logger
if log_cfg.enabled:
log_cfg.name = log_cfg.name or self.name
logger = gg.utils.setup_logger(output=log_cfg.filepath, name=log_cfg.name)
model = self.model
if model is None:
raise RuntimeError(
'You must compile your model before training/testing/predicting. Use `trainer.build()`.'
)
if not isinstance(train_data, (Sequence, DataLoader, Dataset)):
train_data = self.train_loader(train_data)
if cfg.cache_train_data:
cache.train_data = train_data
validation = val_data is not None
if validation:
if not isinstance(val_data, (Sequence, DataLoader, Dataset)):
val_data = self.test_loader(val_data)
if cfg.cache_val_data:
cache.val_data = val_data
# Setup callbacks
callbacks = callbacks_module.CallbackList()
history = History()
callbacks.append(history)
cfg, callbacks = setup_callbacks(cfg, callbacks, validation)
callbacks.set_model(model)
self.callbacks = callbacks
model.stop_training = False
verbose = cfg.verbose
assert not (verbose and log_cfg.enabled), "Progbar and Logger cannot be used together! You must set `verbose=0` when Logger is enabled."
if verbose:
if verbose <= 2:
progbar = Progbar(target=cfg.epochs,
width=pb_cfg.width,
verbose=verbose)
print("Training...")
elif log_cfg.enabled:
logger.info("Training...")
logs = gf.BunchDict()
callbacks.on_train_begin()
# for some initialization
if hasattr(model, 'on_train_begin'):
model.on_train_begin()
try:
for epoch in range(cfg.epochs):
if verbose > 2:
progbar = Progbar(target=len(train_data),
width=pb_cfg.width,
verbose=verbose - 2)
callbacks.on_epoch_begin(epoch)
train_logs = self.train_step(train_data)
if hasattr(train_data, 'on_epoch_end'):
train_data.on_epoch_end()
logs.update({k: to_item(v) for k, v in train_logs.items()})
if validation:
valid_logs = self.test_step(val_data)
logs.update({("val_" + k): to_item(v) for k, v in valid_logs.items()})
if hasattr(val_data, 'on_epoch_end'):
val_data.on_epoch_end()
callbacks.on_train_batch_end(len(train_data), logs)
callbacks.on_epoch_end(epoch, logs)
if verbose > 2:
print(f"Epoch {epoch+1}/{cfg.epochs}")
progbar.update(len(train_data), logs.items())
elif verbose:
progbar.update(epoch + 1, logs.items())
elif log_cfg.enabled:
logger.info(f"Epoch {epoch+1}/{cfg.epochs}\n{gg.utils.create_table(logs)}")
if model.stop_training:
if log_cfg.enabled:
logger.info(f"Early Stopping at Epoch {epoch}")
else:
print(f"Early Stopping at Epoch {epoch}", file=sys.stderr)
break
callbacks.on_train_end()
if ckpt_cfg.enabled:
if ckpt_cfg.save_weights_only:
model.load_weights(ckpt_cfg.path)
else:
self.model = model.load(ckpt_cfg.path)
finally:
# to avoid unexpected termination of the model
if ckpt_cfg.enabled and ckpt_cfg.remove_weights:
self.remove_weights()
return history
def evaluate(self, test_data, **kwargs):
if not self.model:
raise RuntimeError(
'You must compile your model before training/testing/predicting. Use `trainer.build()`.'
)
cache = self.cache
cfg = self.cfg.evaluate
cfg.merge_from_dict(kwargs)
if not isinstance(test_data, (Sequence, DataLoader, Dataset)):
test_data = self.test_loader(test_data)
if cfg.cache_test_data:
cache.test_data = test_data
if cfg.verbose:
print("Testing...")
progbar = Progbar(target=len(test_data),
width=cfg.Progbar.width,
verbose=cfg.verbose)
logs = gf.BunchDict(**self.test_step(test_data))
logs.update({k: to_item(v) for k, v in logs.items()})
progbar.update(len(test_data), logs.items())
return logs
def train_step(self, sequence):
model = self.model
model.reset_metrics()
results = None
for epoch, batch in enumerate(sequence):
self.callbacks.on_train_batch_begin(epoch)
inputs, labels, out_index = unravel_batch(batch)
results = model.train_step_on_batch(x=inputs,
y=labels,
out_index=out_index,
device=self.device)
return results
def test_step(self, sequence):
model = self.model
model.reset_metrics()
results = None
for batch in sequence:
inputs, labels, out_index = unravel_batch(batch)
results = model.test_step_on_batch(x=inputs,
y=labels,
out_index=out_index,
device=self.device)
return results
def predict(self, predict_data=None, transform=None):
if not self.model:
raise RuntimeError(
'You must compile your model before training/testing/predicting. Use `trainer.build()`.'
)
cache = self.cache
cfg = self.cfg.predict
cfg.transform = transform
if not isinstance(predict_data, (Sequence, DataLoader, Dataset)):
predict_data = self.predict_loader(predict_data)
if cfg.cache_predict_data:
cache.predict_data = predict_data
logits = self.predict_step(predict_data)
self.transform.logit_transform = T = gf.get(transform)
logits = T(logits)
return logits.squeeze()
def predict_step(self, sequence):
logits = []
model = self.model
for batch in sequence:
inputs, labels, out_index = unravel_batch(batch)
logit = model.predict_step_on_batch(x=inputs,
out_index=out_index,
device=self.device)
logits.append(logit)
return np.vstack(logits)
def train_loader(self, inputs, **kwargs):
raise NotImplementedError
def test_loader(self, inputs, **kwargs):
return self.train_loader(inputs, **kwargs)
def predict_loader(self, inputs, **kwargs):
return self.test_loader(inputs, **kwargs)
def _test_predict(self, index):
logit = self.predict(index)
predict_class = logit.argmax(1)
labels = self.graph.node_label[index]
return (predict_class == labels).mean()
def reset_weights(self):
# TODO: add pytorch support
"""reset the model to the first time."""
model = self.model
if self.backup is None:
raise RuntimeError(
"You must store the `backup` before `reset_weights`."
"`backup` | |
<reponame>msherman64/portal<gh_stars>0
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from chameleon.decorators import terms_required
from django.contrib import messages
from django.http import (
Http404,
HttpResponseForbidden,
HttpResponse,
HttpResponseRedirect,
HttpResponseNotAllowed,
JsonResponse,
)
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django import forms
from datetime import datetime
from django.conf import settings
from .models import Project, ProjectExtras
from projects.serializer import ProjectExtrasJSONSerializer
from django.contrib.auth.models import User
from django.views.decorators.http import require_POST
from .forms import (
ProjectCreateForm,
ProjectAddUserForm,
AllocationCreateForm,
EditNicknameForm,
AddBibtexPublicationForm,
)
from django.db import IntegrityError
import re
import logging
import json
from keystoneclient.v3 import client as ks_client
from keystoneauth1 import adapter
from django.conf import settings
import uuid
import sys
from chameleon.keystone_auth import admin_ks_client, sync_projects, get_user
from util.project_allocation_mapper import ProjectAllocationMapper
logger = logging.getLogger("projects")
def project_pi_or_admin_or_superuser(user, project):
if user.is_superuser:
return True
if user.groups.filter(name="Allocation Admin").count() == 1:
return True
if user.username == project.pi.username:
return True
return False
def project_member_or_admin_or_superuser(user, project, project_user):
if project_pi_or_admin_or_superuser(user, project):
return True
for pu in project_user:
if user.username == pu.username:
return True
return False
@login_required
def user_projects(request):
context = {}
username = request.user.username
mapper = ProjectAllocationMapper(request)
user = mapper.get_user(username)
context["is_pi_eligible"] = user["piEligibility"].lower() == "eligible"
context["username"] = username
context["projects"] = mapper.get_user_projects(username, to_pytas_model=True)
return render(request, "projects/user_projects.html", context)
@login_required
def view_project(request, project_id):
mapper = ProjectAllocationMapper(request)
try:
project = mapper.get_project(project_id)
if project.source != "Chameleon":
raise Http404("The requested project does not exist!")
except Exception as e:
logger.error(e)
raise Http404("The requested project does not exist!")
form = ProjectAddUserForm()
nickname_form = EditNicknameForm()
pubs_form = AddBibtexPublicationForm()
if request.POST and project_pi_or_admin_or_superuser(request.user, project):
form = ProjectAddUserForm()
if "add_user" in request.POST:
form = ProjectAddUserForm(request.POST)
if form.is_valid():
try:
add_username = form.cleaned_data["username"]
if mapper.add_user_to_project(project, add_username):
sync_project_memberships(request, add_username)
messages.success(
request, f'User "{add_username}" added to project!'
)
form = ProjectAddUserForm()
except Exception as e:
logger.exception("Failed adding user")
messages.error(
request,
(
"Unable to add user. Confirm that the username is "
"correct and corresponds to a current Chameleon user."
),
)
else:
messages.error(
request,
(
"There were errors processing your request. "
"Please see below for details."
),
)
elif "del_user" in request.POST:
try:
del_username = request.POST["username"]
# Ensure that it's not possible to remove the PI
if del_username == project.pi.username:
raise PermissionDenied(
"Removing the PI from the project is not allowed."
)
if mapper.remove_user_from_project(project, del_username):
sync_project_memberships(request, del_username)
messages.success(
request, 'User "%s" removed from project' % del_username
)
except PermissionDenied as exc:
messages.error(request, exc)
except:
logger.exception("Failed removing user")
messages.error(
request,
"An unexpected error occurred while attempting "
"to remove this user. Please try again",
)
elif "nickname" in request.POST:
nickname_form = edit_nickname(request, project_id)
users = mapper.get_project_members(project)
if not project_member_or_admin_or_superuser(request.user, project, users):
raise PermissionDenied
for a in project.allocations:
if a.start and isinstance(a.start, str):
a.start = datetime.strptime(a.start, "%Y-%m-%dT%H:%M:%SZ")
if a.dateRequested:
if isinstance(a.dateRequested, str):
a.dateRequested = datetime.strptime(
a.dateRequested, "%Y-%m-%dT%H:%M:%SZ"
)
if a.dateReviewed:
if isinstance(a.dateReviewed, str):
a.dateReviewed = datetime.strptime(a.dateReviewed, "%Y-%m-%dT%H:%M:%SZ")
if a.end:
if isinstance(a.end, str):
a.end = datetime.strptime(a.end, "%Y-%m-%dT%H:%M:%SZ")
user_mashup = []
for u in users:
user = {
"username": u.username,
"role": u.role,
}
try:
portal_user = User.objects.get(username=u.username)
user["email"] = portal_user.email
user["first_name"] = portal_user.first_name
user["last_name"] = portal_user.last_name
except User.DoesNotExist:
logger.info("user: " + u.username + " not found")
user_mashup.append(user)
return render(
request,
"projects/view_project.html",
{
"project": project,
"project_nickname": project.nickname,
"users": user_mashup,
"is_pi": request.user.username == project.pi.username,
"form": form,
"nickname_form": nickname_form,
"pubs_form": pubs_form,
},
)
def set_ks_project_nickname(chargeCode, nickname):
for region in list(settings.OPENSTACK_AUTH_REGIONS.keys()):
ks_admin = admin_ks_client(region=region)
project_list = ks_admin.projects.list(domain=ks_admin.user_domain_id)
project = [
this
for this in project_list
if getattr(this, "charge_code", None) == chargeCode
]
logger.info(
"Assigning nickname {0} to project with charge code {1} at {2}".format(
nickname, chargeCode, region
)
)
if project and project[0]:
project = project[0]
ks_admin.projects.update(project, name=nickname)
logger.info(
"Successfully assigned nickname {0} to project with charge code {1} at {2}".format(
nickname, chargeCode, region
)
)
def sync_project_memberships(request, username):
"""Re-sync a user's Keystone project memberships.
This calls utils.auth.keystone_auth.sync_projects under the hood, which
will dynamically create missing projects as well.
Args:
request (Request): the parent request; used for region detection.
username (str): the username to sync memberships for.
Return:
List[keystone.Project]: a list of Keystone projects the user is a
member of.
"""
mapper = ProjectAllocationMapper(request)
try:
ks_admin = admin_ks_client(request=request)
ks_user = get_user(ks_admin, username)
if not ks_user:
logger.error(
(
"Could not fetch Keystone user for {}, skipping membership syncing".format(
username
)
)
)
return
active_projects = mapper.get_user_projects(
username, alloc_status=["Active"], to_pytas_model=True
)
return sync_projects(ks_admin, ks_user, active_projects)
except Exception as e:
logger.error("Could not sync project memberships for %s: %s", username, e)
return []
@login_required
@terms_required("project-terms")
def create_allocation(request, project_id, allocation_id=-1):
mapper = ProjectAllocationMapper(request)
user = mapper.get_user(request.user.username)
if user["piEligibility"].lower() != "eligible":
messages.error(
request,
"Only PI Eligible users can request allocations. If you would "
"like to request PI Eligibility, please "
'<a href="/user/profile/edit/">submit a PI Eligibility '
"request</a>.",
)
return HttpResponseRedirect(reverse("projects:user_projects"))
project = mapper.get_project(project_id)
allocation = None
allocation_id = int(allocation_id)
if allocation_id > 0:
for a in project.allocations:
if a.id == allocation_id:
allocation = a
# goofiness that we should clean up later; requires data cleansing
abstract = project.description
if "--- Supplemental details ---" in abstract:
additional = abstract.split("\n\n--- Supplemental details ---\n\n")
abstract = additional[0]
additional = additional[1].split("\n\n--- Funding source(s) ---\n\n")
justification = additional[0]
if len(additional) > 1:
funding_source = additional[1]
else:
funding_source = ""
elif allocation:
justification = allocation.justification
if "--- Funding source(s) ---" in justification:
parts = justification.split("\n\n--- Funding source(s) ---\n\n")
justification = parts[0]
funding_source = parts[1]
else:
funding_source = ""
else:
justification = ""
funding_source = ""
if request.POST:
form = AllocationCreateForm(
request.POST,
initial={
"description": abstract,
"supplemental_details": justification,
"funding_source": funding_source,
},
)
if form.is_valid():
allocation = form.cleaned_data.copy()
allocation["computeRequested"] = 20000
# Also update the project
project.description = allocation.pop("description", None)
supplemental_details = allocation.pop("supplemental_details", None)
logger.error(supplemental_details)
funding_source = allocation.pop("funding_source", None)
# if supplemental_details == None:
# raise forms.ValidationError("Justifcation is required")
# This is required
if not supplemental_details:
supplemental_details = "(none)"
logger.error(supplemental_details)
if funding_source:
allocation[
"justification"
] = "%s\n\n--- Funding source(s) ---\n\n%s" % (
supplemental_details,
funding_source,
)
else:
allocation["justification"] = supplemental_details
allocation["projectId"] = project_id
allocation["requestorId"] = mapper.get_portal_user_id(request.user.username)
allocation["resourceId"] = "39"
if allocation_id > 0:
allocation["id"] = allocation_id
try:
logger.info(
"Submitting allocation request for project %s: %s"
% (project.id, allocation)
)
updated_project = mapper.save_project(project.as_dict())
mapper.save_allocation(
allocation, project.chargeCode, request.get_host()
)
messages.success(request, "Your allocation request has been submitted!")
return HttpResponseRedirect(
reverse("projects:view_project", args=[updated_project["id"]])
)
except:
logger.exception("Error creating allocation")
form.add_error(
"__all__", "An unexpected error occurred. Please try again"
)
else:
form.add_error(
"__all__",
"There were errors processing your request. "
"Please see below for details.",
)
else:
form = AllocationCreateForm(
initial={
"description": abstract,
"supplemental_details": justification,
"funding_source": funding_source,
}
)
context = {
"form": form,
"project": project,
"alloc_id": allocation_id,
"alloc": allocation,
}
return render(request, "projects/create_allocation.html", context)
@login_required
@terms_required("project-terms")
def create_project(request):
mapper = ProjectAllocationMapper(request)
form_args = {"request": request}
user = mapper.get_user(request.user.username)
if user["piEligibility"].lower() != "eligible":
messages.error(
request,
"Only PI Eligible users can create new projects. "
"If you would like to request PI Eligibility, please "
'<a href="/user/profile/edit/">submit a PI Eligibility '
"request</a>.",
)
return HttpResponseRedirect(reverse("projects:user_projects"))
if request.POST:
form = ProjectCreateForm(request.POST, **form_args)
if form.is_valid():
# title, description, typeId, fieldId
project = form.cleaned_data.copy()
# let's check that any provided nickname is unique
project["nickname"] = project["nickname"].strip()
nickname_valid = (
project["nickname"]
and ProjectExtras.objects.filter(nickname=project["nickname"]).count()
< 1
and Project.objects.filter(nickname=project["nickname"]).count() < 1
)
if not nickname_valid:
form.add_error("__all__", "Project nickname unavailable")
return render(request, "projects/create_project.html", {"form": form})
project.pop("accept_project_terms", None)
# pi
pi_user_id = mapper.get_portal_user_id(request.user.username)
project["piId"] = pi_user_id
# allocations
allocation = {
"resourceId": 39,
"requestorId": pi_user_id,
"computeRequested": 20000,
}
supplemental_details = project.pop("supplemental_details", None)
funding_source = project.pop("funding_source", None)
# if supplemental_details == None:
# raise forms.ValidationError("Justifcation is required")
if not supplemental_details:
supplemental_details = "(none)"
if funding_source:
allocation[
"justification"
] = "%s\n\n--- Funding source(s) ---\n\n%s" % (
supplemental_details,
funding_source,
)
else:
allocation["justification"] = supplemental_details
project["allocations"] = [allocation]
# startup
project["typeId"] = 2
# source
project["source"] = "Chameleon"
try:
created_project = mapper.save_project(project, request.get_host())
logger.info("newly created project: " + json.dumps(created_project))
messages.success(request, "Your project has been created!")
return HttpResponseRedirect(
reverse("projects:view_project", args=[created_project["id"]])
)
except:
logger.exception("Error creating project")
form.add_error(
"__all__", "An unexpected error occurred. Please try again"
)
else:
form.add_error(
"__all__",
"There were errors processing your request. "
"Please see below for details.",
)
else:
form = ProjectCreateForm(**form_args)
return render(request, "projects/create_project.html", {"form": form})
@login_required
def edit_project(request):
context = {}
return render(request, "projects/edit_project.html", context)
@require_POST
def edit_nickname(request, project_id):
mapper = ProjectAllocationMapper(request)
project = mapper.get_project(project_id)
if not project_pi_or_admin_or_superuser(request.user, project):
messages.error(request, "Only the project PI can update nickname.")
return EditNicknameForm()
form = EditNicknameForm(request.POST)
if form.is_valid(request):
# try | |
-1),
(assign, "$g_presentation_obj_escape_menu_14", -1),
(assign, "$g_presentation_obj_escape_menu_15", -1),
(assign, "$g_presentation_obj_escape_menu_16", -1),
(assign, "$g_presentation_obj_escape_menu_17", -1),
(create_mesh_overlay, reg0, "mesh_mp_ingame_menu"),
(position_set_x, pos1, 250),
(position_set_y, pos1, 40),#80 move down
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 1000),
(position_set_y, pos1, 1100),#1000 size
(overlay_set_size, reg0, pos1),
(str_clear, s0),
(create_text_overlay, "$g_presentation_obj_escape_menu_container", s0, tf_scrollable_style_2),
(position_set_x, pos1, 285),
(position_set_y, pos1, 75),#75
(overlay_set_position, "$g_presentation_obj_escape_menu_container", pos1),
(position_set_x, pos1, 405),
(position_set_y, pos1, 580),#550
(overlay_set_area_size, "$g_presentation_obj_escape_menu_container", pos1),
(set_container_overlay, "$g_presentation_obj_escape_menu_container"),
(assign, ":cur_y", 550), #500 add length with scrollbar
(create_text_overlay, reg0, "str_choose_an_option", 0),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(create_button_overlay, "$g_presentation_obj_escape_menu_1", "str_choose_faction", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_1", 0xFFFFFF),
(try_begin),
(multiplayer_get_my_team, ":my_team"),
(lt, ":my_team", multi_team_spectator),
(create_button_overlay, "$g_presentation_obj_escape_menu_2", "@Choose Character", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_2", 0xFFFFFF),
(create_button_overlay, "$g_presentation_obj_escape_menu_3", "@Choose Troops", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_3", 0xFFFFFF),
(try_end),
(create_button_overlay, "$g_presentation_obj_escape_menu_4", "str_options", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_4", 0xFFFFFF),
# (create_button_overlay, "$g_presentation_obj_escape_menu_5", "str_redefine_keys", 0),
# (overlay_set_color, "$g_presentation_obj_escape_menu_5", 0xFFFFFF),
(multiplayer_get_my_player, ":my_player_no"),
(try_begin),
# (this_or_next|eq, "$g_multiplayer_maps_voteable", 1),
# (this_or_next|eq, "$g_multiplayer_factions_voteable", 1),
# (this_or_next|gt, "$g_multiplayer_num_bots_voteable", 0),
(this_or_next|eq, "$g_multiplayer_kick_voteable", 1),
(eq, "$g_multiplayer_ban_voteable", 1),
(create_button_overlay, "$g_presentation_obj_escape_menu_6", "str_submit_a_poll", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_6", 0xFFFFFF),
(assign, "$g_presentation_obj_escape_menu_6_available", 1),
(try_begin),
(ge, ":my_player_no", 0),
(player_get_slot, ":last_poll_time", ":my_player_no", slot_player_poll_disabled_until_time),
(store_mission_timer_a, ":mission_timer"),
(lt, ":mission_timer", ":last_poll_time"),
(overlay_set_color, "$g_presentation_obj_escape_menu_6", 0x888888),
(overlay_set_hilight_color, "$g_presentation_obj_escape_menu_6", 0x888888),
(assign, "$g_presentation_obj_escape_menu_6_available", 0),
(try_end),
(try_end),
(try_begin),
(ge, ":my_player_no", 0),
(player_is_admin, ":my_player_no"),
(create_button_overlay, "$g_presentation_obj_escape_menu_7", "str_administrator_panel", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_7", 0xFFFFFF),
(create_button_overlay, "$g_presentation_obj_escape_menu_8", "str_kick_player", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_8", 0xFFFFFF),
(create_button_overlay, "$g_presentation_obj_escape_menu_9", "str_ban_player", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_9", 0xFFFFFF),
(try_begin),
(eq, "$coop_battle_started", 0),
(create_button_overlay, "$g_presentation_obj_escape_menu_10", "@Start Battle", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_10", 0xFFFFFF),
(try_end),
(try_end),
(create_button_overlay, "$g_presentation_obj_escape_menu_11", "str_mute_player", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_11", 0xFFFFFF),
(try_begin),
(assign, "$g_presentation_obj_escape_menu_12", -1),
(assign, ":any_muted", 0),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 0, ":num_players"),
(player_is_active, ":player_no"),
(player_get_is_muted, ":is_muted", ":player_no"),
(eq, ":is_muted", 1),
(assign, ":any_muted", 1),
(try_end),
(eq, ":any_muted", 1),
(create_button_overlay, "$g_presentation_obj_escape_menu_12", "str_unmute_player", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_12", 0xFFFFFF),
(try_end),
(create_button_overlay, "$g_presentation_obj_escape_menu_13", "@Show Game Rules", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_13", 0xFFFFFF),
(create_button_overlay, "$g_presentation_obj_escape_menu_14", "str_quit", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_14", 0xFFFFFF),
(try_begin),
(multiplayer_get_my_player, ":my_player_no"),
(player_get_team_no, ":my_team_no", ":my_player_no"),
(eq, ":my_team_no", 1),
(gt, "$coop_my_troop_no", 0),
(assign, ":stop", 0),
(try_for_agents, ":cur_agent"),
(eq, ":stop", 0),
(agent_is_human, ":cur_agent"),
(assign, ":stop", 1),
(try_end),
(eq, ":stop", 0),
(create_button_overlay, "$g_presentation_obj_escape_menu_15", "@Access Inventory (Buggy) ", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_15", 0xFFFFFF),
(try_end),
(create_button_overlay, "$g_presentation_obj_escape_menu_16", "@Toggle xp Messages", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_16", 0xFFFFFF),
(try_begin),
(ge, ":my_player_no", 0),
(player_is_admin, ":my_player_no"),
# (eq, "$g_round_ended", 1), #allow retreat early
(eq, "$coop_battle_started", 1),
(create_button_overlay, "$g_presentation_obj_escape_menu_17", "@End Battle", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_17", 0xFFFFFF),
(try_end),
###
(position_set_x, pos1, 130),
#
(try_begin),
(ge, "$g_presentation_obj_escape_menu_10", 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_10", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(try_end),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_1", pos1),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_2", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_2", pos1),
(try_end),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_3", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_3", pos1),
(try_end),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_15", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_15", pos1),
(try_end),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_4", pos1),
# (val_sub, ":cur_y", escape_menu_item_height),
# (position_set_y, pos1, ":cur_y"),
# (overlay_set_position, "$g_presentation_obj_escape_menu_5", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_13", pos1),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_6", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_6", pos1),
(try_end),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_7", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_7", pos1),
(try_end),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_8", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_8", pos1),
(try_end),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_9", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_9", pos1),
(try_end),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_11", pos1),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_12", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_12", pos1),
(try_end),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_16", pos1),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_17", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_17", pos1),
(try_end),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_14", pos1),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_event_state_change,
[(store_trigger_param_1, ":object"),
(try_begin),
(eq, ":object", "$g_presentation_obj_escape_menu_1"),
(presentation_set_duration, 0),
(start_presentation, "prsnt_coop_team_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_2"),
(presentation_set_duration, 0),
(start_presentation, "prsnt_coop_troop_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_3"),
(presentation_set_duration, 0),
(assign, "$g_presentation_state", 0),
(start_presentation, "prsnt_coop_commander_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_4"),
(presentation_set_duration, 0),
(change_screen_options),
# (else_try),
# (eq, ":object", "$g_presentation_obj_escape_menu_5"),
# (presentation_set_duration, 0),
# (change_screen_controls),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_6"),
(eq, "$g_presentation_obj_escape_menu_6_available", 1),
(presentation_set_duration, 0),
(start_presentation, "prsnt_multiplayer_poll_menu"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_7"),
(presentation_set_duration, 0),
(multiplayer_send_int_to_server, multiplayer_event_coop_send_to_server, coop_event_open_admin_panel),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_8"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_players_list_action_type", 3), #admin kick
(start_presentation, "prsnt_multiplayer_show_players_list"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_9"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_players_list_action_type", 4), #admin ban
(start_presentation, "prsnt_multiplayer_show_players_list"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_10"),# start battle
(multiplayer_send_int_to_server, multiplayer_event_coop_send_to_server, coop_event_start_battle),
(presentation_set_duration, 0),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_11"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_players_list_action_type", 5), #mute player
(start_presentation, "prsnt_multiplayer_show_players_list"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_12"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_players_list_action_type", 6), #unmute player
(start_presentation, "prsnt_multiplayer_show_players_list"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_13"),
(presentation_set_duration, 0),
(multiplayer_send_int_to_server, multiplayer_event_coop_send_to_server, coop_event_open_game_rules),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_14"),
(presentation_set_duration, 0),
# (call_script, "script_game_quick_start"),
(try_begin),
(multiplayer_is_server),
(multiplayer_send_int_to_server, multiplayer_event_coop_send_to_server, coop_event_end_battle),
(else_try),
(finish_mission, 0),
(try_end),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_15"),
(presentation_set_duration, 0),
(multiplayer_send_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_open_inventory_before_spawn),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_16"),
(try_begin),
(eq, "$coop_toggle_messages", 0),
(assign, "$coop_toggle_messages", 1),
(display_message, "@Messages: off"),
(else_try),
(eq, "$coop_toggle_messages", 1),
(assign, "$coop_toggle_messages", 0),
(display_message, "@Messages: show xp"),
(try_end),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_17"),
(multiplayer_send_int_to_server, multiplayer_event_coop_send_to_server, coop_event_end_battle),
(assign, "$coop_battle_started", -1),
(presentation_set_duration, 0),
(start_presentation, "prsnt_coop_escape_menu"),
(try_end),
]),
(ti_on_presentation_run,
[(store_trigger_param_1, ":cur_time"),
(try_begin),
(key_clicked, key_escape),
(gt, ":cur_time", 200),
(presentation_set_duration, 0),
(try_end),
]),
]),
("coop_stats_chart", prsntf_read_only|prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(create_mesh_overlay, reg0, "mesh_mp_score_b"),
(position_set_x, pos1, 100),
(position_set_y, pos1, 100),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 1000),
(position_set_y, pos1, 1000),
(overlay_set_size, reg0, pos1),
(assign, ":team_1_rows", 0),
(assign, ":team_2_rows", 0),
(assign, ":spectator_rows", 0),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 0, ":num_players"),
(store_add, ":slot_index", ":player_no", multi_data_player_index_list_begin),
(try_begin),
(player_is_active, ":player_no"),
(troop_set_slot, "trp_multiplayer_data", ":slot_index", 1),
(player_get_team_no, ":player_team", ":player_no"),
(try_begin),
(eq, ":player_team", 0),
(val_add, ":team_1_rows", 1),
(else_try),
(eq, ":player_team", 1),
(val_add, ":team_2_rows", 1),
(else_try),
(eq, ":player_team", multi_team_spectator),
(val_add, ":spectator_rows", 1),
(try_end),
(else_try),
(troop_set_slot, "trp_multiplayer_data", ":slot_index", 0),
(try_end),
(try_end),
(try_begin),
# (gt, "$g_multiplayer_num_bots_team_1", 0),
(val_add, ":team_1_rows", 1),
(try_end),
(try_begin),
# (gt, "$g_multiplayer_num_bots_team_2", 0),
(val_add, ":team_2_rows", 1),
(try_end),
(assign, ":total_rows", ":team_1_rows"),
(val_max, ":total_rows", ":team_2_rows"),
(val_add, ":total_rows", ":spectator_rows"),
(str_clear, s0),
(create_text_overlay, "$g_presentation_obj_stats_chart_container", s0, tf_scrollable_style_2),
(position_set_x, pos1, 100),
(position_set_y, pos1, 120),#120
(overlay_set_position, "$g_presentation_obj_stats_chart_container", pos1),
(position_set_x, pos1, 746),
(position_set_y, pos1, 530),#530
(overlay_set_area_size, "$g_presentation_obj_stats_chart_container", pos1),
(set_container_overlay, "$g_presentation_obj_stats_chart_container"),
(store_mul, ":y_needed", ":total_rows", 20),
(val_add, ":y_needed", 100),
(try_begin),
(gt, ":spectator_rows", 0),
(val_add, ":y_needed", 70),
(try_end),
(multiplayer_get_my_player, ":my_player_no"),
(try_begin),
(gt, ":y_needed", 490),
(assign, "$g_stats_chart_update_period", 8),
(else_try),
(assign, "$g_stats_chart_update_period", 1),
(try_end),
# (try_begin), #counting number of flags each team has.
# (eq, "$g_multiplayer_game_type", multiplayer_game_type_headquarters),
# (call_script, "script_get_headquarters_scores"),
# (assign, ":team_1_num_flags", reg0),
# (assign, ":team_2_num_flags", reg1),
# (try_end),
#assuming only 2 teams in scene
(try_for_range, ":i_team", 0, multi_team_spectator),
(assign, ":number_of_players", 0),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 0, ":num_players"),
(player_is_active, ":player_no"),
(player_get_team_no, ":team_no", ":player_no"),
(eq, ":team_no", ":i_team"),
(val_add, ":number_of_players", 1),
(try_end),
(assign, reg0, ":number_of_players"),
(try_begin),
(neq, ":number_of_players", 1),
(create_text_overlay, reg1, "str_reg0_players", 0),
(else_try),
(create_text_overlay, reg1, "str_reg0_player", 0),
(try_end),
(assign, ":cur_y", ":y_needed"),
(team_get_faction, ":faction_of_troop_party_prisoner_stack_troop_id_script_param_1_leaded_party_2_var_18", ":i_team"),
(str_store_faction_name, s1, ":faction_of_troop_party_prisoner_stack_troop_id_script_param_1_leaded_party_2_var_18"),
(create_text_overlay, reg0, s1, 0),
(try_begin),
(eq, ":i_team", 0),
(overlay_set_color, reg0, 0xFF0000),
(overlay_set_color, reg1, 0xFF0000),
(else_try),
(overlay_set_color, reg0, 0x0099FF),
(overlay_set_color, reg1, 0x0099FF),
(try_end),
(assign, ":distance_between_teams", 373),
(store_mul, ":cur_x", ":distance_between_teams", ":i_team"),
(val_add, ":cur_x", 42),
# (store_add, ":cur_x_add_15", ":cur_x", 15),
# (position_set_x, pos3, ":cur_x_add_15"),
# (position_set_y, pos3, ":cur_y"),
(store_add, ":cur_x_add_35", ":cur_x", 0),
(position_set_x, pos1, ":cur_x_add_35"),
(position_set_y, pos1, ":cur_y"),
(copy_position, pos2, pos1),
(store_sub, ":cur_y_sub_10", ":cur_y", 10),
(position_set_x, pos2, ":cur_x_add_35"),
(position_set_y, pos2, ":cur_y_sub_10"),
(overlay_set_position, reg0, pos1),
(overlay_set_position, reg1, pos2),
(position_set_x, pos1, 1000),
(position_set_y, pos1, 1000),
(position_set_x, pos2, 600),
(position_set_y, pos2, 600),
(overlay_set_size, reg0, pos1),
(overlay_set_size, reg1, pos2),
# dont use faction shield so name will always show
# (team_get_faction, ":faction_of_team_1", 0),
# (team_get_faction, ":faction_of_team_2", 1),
# (try_begin),
# (eq, ":faction_of_team_1", ":faction_of_team_2"),
# (eq, ":i_team", 1),
# (create_mesh_overlay, reg0, "mesh_ui_kingdom_shield_7"),
# (else_try),
# (eq, ":faction_of_troop_party_prisoner_stack_troop_id_script_param_1_leaded_party_2_var_18", "fac_kingdom_4"),
# (create_mesh_overlay, reg0, "mesh_ui_kingdom_shield_1"),
# (else_try),
# (eq, ":faction_of_troop_party_prisoner_stack_troop_id_script_param_1_leaded_party_2_var_18", "fac_kingdom_2"),
# (create_mesh_overlay, reg0, "mesh_ui_kingdom_shield_2"),
# (else_try),
# (eq, ":faction_of_troop_party_prisoner_stack_troop_id_script_param_1_leaded_party_2_var_18", "fac_kingdom_3"),
# (create_mesh_overlay, reg0, "mesh_ui_kingdom_shield_3"),
# (else_try),
# (eq, ":faction_of_troop_party_prisoner_stack_troop_id_script_param_1_leaded_party_2_var_18", "fac_kingdom_5"),
# (create_mesh_overlay, reg0, "mesh_ui_kingdom_shield_4"),
# (else_try),
# (eq, ":faction_of_troop_party_prisoner_stack_troop_id_script_param_1_leaded_party_2_var_18", "fac_kingdom_6"),
# (create_mesh_overlay, reg0, "mesh_ui_kingdom_shield_5"),
# (else_try),
# (eq, ":faction_of_troop_party_prisoner_stack_troop_id_script_param_1_leaded_party_2_var_18", "fac_kingdom_1"),
# (create_mesh_overlay, reg0, "mesh_ui_kingdom_shield_6"),
# (try_end),
# (position_set_x, pos1, 100),
# (position_set_y, pos1, 100),
# (overlay_set_position, reg0, pos3),
#this part removes faction name if it cant find a shield
# (position_set_x, pos1, 50),
# (position_set_y, pos1, 50),
# (overlay_set_size, reg0, pos1),
#############
(assign, ":number_of_alive", 0),
(assign, ":number_of_alive_bots", 0),
(try_for_agents, ":cur_agent"),
(agent_is_human, ":cur_agent"),
(agent_is_alive, ":cur_agent"),
(agent_get_team, ":cur_agent_team", ":cur_agent"),
| |
for ghcnd plot')
pass
# yearly annotation
precip_yr = ser.sum()/(ser.index.date[-1]-ser.index.date[0]).days * 365.25
ax.text(0.22,0.98, f'{precip_yr:.2f} mm/year from {s} ghcnd sites',transform=ax.transAxes, fontsize=7, va='top')
ax.set_title(f'{sitename}: cumulative rainfall from nearby GHCND sites')
ax.set_xlabel('')
ax.set_ylabel(f'cumulative rain (mm)')
ax.set_ylim((0,None))
for i,key in enumerate(missing.keys()):
ax.text(0.99,0.01 + 0.03*i, missing[key],transform=ax.transAxes, fontsize=7, va='bottom',ha='right')
# ax.legend(loc='upper center',fontsize=7,bbox_to_anchor=(0.5,-0.05),ncol=2)
ax.legend(loc='upper left',fontsize=7)
fig.savefig(f'{sitepath}/precip_plots/{sitename}_ghcnd_cumulative_precip.{img_fmt}',bbox_inches='tight',dpi=150)
return
def write_ghcnd_precip(sitepath,sitename,ser):
assert len(ser) == ser.count(), 'precip still missing, add more sites'
assert ser.count() == len(ser), 'nan in rain obs'
assert any(ser.index.duplicated()) == False, 'rain obs has duplicate days'
ser.to_csv(f'{sitepath}/timeseries/{sitename}_ghcnd_precip.csv',header=True)
precip_yr = ser.sum()/(ser.index.date[-1]-ser.index.date[0]).days * 365.25
print('nearby met stations precip: %.2f mm/year' %(precip_yr))
return
def plot_snow_partitioning(obs_ds,forcing_ds,era_ds,sitepath,sitename):
# sdate,edate = '2013-01-15', '2013-02-16'
sdate,edate = obs_ds.time_coverage_start,obs_ds.time_coverage_end
ts = obs_ds.timestep_interval_seconds
obs = obs_ds.sel(time=slice(sdate,edate))[['Rainf','Snowf','Tair']].squeeze().to_dataframe()
# obs = obs.resample('1H',closed='right',label='right').mean()
obs['precip'] = obs['Rainf']
# add snow to precip if recorded
idx = obs['Snowf'].dropna().index
obs.loc[idx,'precip'] = obs.loc[idx,'Rainf'] + obs.loc[idx,'Snowf']
fill = forcing_ds.sel(time=slice(sdate,edate))[['Rainf','Snowf','Tair']].squeeze().to_dataframe()
# fill = fill.resample('1H',closed='right',label='right').mean()
fill['precip'] = fill['Rainf'] + fill['Snowf']
era = era_ds.sel(time=slice(sdate,edate)).squeeze().to_dataframe()[['Rainf','Snowf','Tair']]
era = era.resample('30Min').asfreq()
era[['Rainf','Snowf']] = era[['Rainf','Snowf']].backfill()
era[['Tair']] = era[['Tair']].interpolate()
era['precip'] = era['Rainf'] + era['Snowf']
###############
plt.close('all')
fig, ax = plt.subplots(figsize=(8,4))
ax.set_title(f'Precipitation in {sitename}: corrected')
ax.set_ylabel('Water fluxes [mm]')
(obs['precip']*ts).cumsum().plot(ax=ax, color='k', lw=2, ls='solid',label='raw obs all precip')
(fill['precip']*ts).cumsum().plot(ax=ax, color='r', lw=1, ls='solid',label='forcing all precip')
(fill['Rainf']*ts).cumsum().plot(ax=ax, color='r', lw=1, ls='dashed',label='forcing Rainf')
(fill['Snowf']*ts).cumsum().plot(ax=ax, color='r', lw=1, ls='dotted',label='forcing Snowf')
# (era['precip']*ts).cumsum().plot(ax=ax, color='royalblue', lw=1, ls='solid',label='ERA5 all precip')
try:
fname = f'{sitepath}/timeseries/{sitename}_ghcnd_precip.csv'
ghcnd = pd.read_csv(fname,index_col=0,parse_dates=True)[sdate:edate]
ghcnd.rename(columns={'ghcnd':'GHCND all precip'},inplace=True)
ghcnd.cumsum().plot(ax=ax, color='purple', lw=1)
except:
print('GHCND data not found')
pass
ax2 = ax.twinx()
ax2.set_ylabel('Air temperature [°C]')
(fill['Tair'] - 273.15).plot(ax=ax2, color='0.75', ls='solid',lw=1,label='obs temperature')
ax.legend(loc='upper left', fontsize=8)
ax2.legend(loc='center right', fontsize=8)
ax.set_xlabel('')
ax.set_zorder(ax2.get_zorder()+1) # put ax in front of ax2
ax.patch.set_visible(False) # hide the 'canvas'
ax.set_ylim(0,None)
# plt.show()
fig.savefig(f'{sitepath}/precip_plots/{sitename}_snow_correction.{img_fmt}', dpi=150,bbox_inches='tight')
# plt.show()
plt.close('all')
return
def calc_MAE(sim,obs):
'''Calculate Mean Absolute Error from Best et al 2015'''
metric = abs(sim-obs).mean()
return metric
def calc_MBE(sim,obs):
'''Calculate Mean Bias Error from Best et al 2015'''
metric = np.mean(sim-obs)
return metric
def calc_NSD(sim,obs):
'''calculate normalised standard deviation'''
metric = sim.std()/obs.std()
return metric
def calc_NSD(sim,obs):
'''calculate normalised standard deviation'''
metric = sim.std()/obs.std()
return metric
def calc_R(sim,obs):
'''cacluate normalised correlation coefficient (pearsons)'''
metric = sim.corr(obs, method='pearson')
return metric
###############################################################################
def calc_era5_linear_corrections(era_ds,watch_ds,obs_ds,siteattrs,sitedata):
sitename = siteattrs['sitename']
sitepath = siteattrs['sitepath']
lin_ds = era_ds.copy()
min_obs = 10
print('\ncorrecting Wind linearly')
if len(obs_ds['Wind_N'].to_series().dropna().unique()) > min_obs:
obs_wind = np.sqrt(obs_ds['Wind_N'].to_series()**2 + obs_ds['Wind_E'].to_series()**2)
era_wind = era_ds['Wind'].to_series()
lin_ds['Wind'].values = linear_debiasing('Wind',sitepath,era_wind,obs_wind)
print('')
print(f'mean observed wind speed: {obs_wind.mean():.2f} m/s')
print(f'mean wind speed change from {era_wind.mean():.2f} to {lin_ds.Wind.to_series().mean():.2f} m/s')
era_wdir = convert_uv_to_wdir(era_ds['Wind_E'],era_ds['Wind_N'])
lin_ds['Wind_E'].values = convert_wdir_to_uv(lin_ds['Wind'].values,era_wdir)[0]
lin_ds['Wind_N'].values = convert_wdir_to_uv(lin_ds['Wind'].values,era_wdir)[1]
################################################################################
for key in ['Tair','PSurf','Qair','SWdown']:
print(f'\ncorrecting {key} linearly')
if len(obs_ds[key].to_series().dropna().unique()) > min_obs:
era = era_ds[key].to_series()
obs = obs_ds[key].to_series()
lin_ds[key].values = linear_debiasing(key,sitepath,era,obs)
################################################################################
# fill NaN values in corrected dataset with zero
lin_ds['SWdown'].values = lin_ds['SWdown'].fillna(0.).values
# set negative values to zero
lin_ds['SWdown'].values = lin_ds['SWdown'].where(lin_ds['SWdown']>=0., 0.).values
# # setting very small values to zero
lin_ds.Rainf.values = lin_ds.Rainf.where(lin_ds.Rainf>1E-8,0.).values
lin_ds.Snowf.values = lin_ds.Snowf.where(lin_ds.Snowf>1E-8,0.).values
if sitename in ['JP-Yoyogi']:
lin_ds['Qair'].values = lin_ds['Qair'].where(lin_ds['Qair']>=0.0001, 0.0001).values
################################################################################
key = 'LWdown'
print(f'\ncorrecting {key} linearlly')
if sitename == 'MX-Escandon': # MX-Escandon has no LW in 2011 obs period, using 2006 observed data for bias correction
print('Using LWdown from 2006 for bias correction at MX-Escandon')
# obs_LWdown = xr.open_dataset(f'{sitepath}/MX-Escandon_era5_corr_v2006.nc')['LWdown']
obs_LWdown = xr.open_dataset(f'{sitepath}/timeseries/MX-Escandon_raw2006_observations.nc')['LWdown']
else:
obs_LWdown = obs_ds['LWdown']
try:
print('remove spurious LWdown era5 value at 2010-11-27 09:00 (at many sites)')
before = era_ds['LWdown'].loc[dict(time='2010-11-27 08:00')]
after = era_ds['LWdown'].loc[dict(time='2010-11-27 10:00')]
era_ds['LWdown'].loc[dict(time='2010-11-27 09:00')] = 0.5*(before+after)
except Exception:
print('No correction done to ERA5 for LW on 2010-11-27 09:00 (not found)')
if len(obs_LWdown.to_series().dropna().unique()) > min_obs:
era = era_ds[key].to_series()
obs = obs_LWdown.to_series()
lin_ds[key].values = linear_debiasing(key,sitepath,era,obs)
print(f'mean Tair change from {era_ds["Tair"].mean().values-273.15:.1f} to {lin_ds["Tair"].mean().values-273.15:.1f} °C')
print(f'mean PSurf change from {era_ds["PSurf"].mean().values:.1f} to {lin_ds["PSurf"].mean().values:.1f} Pa')
print(f'mean Qair change from {era_ds["Qair"].mean().values:.4f} to {lin_ds["Qair"].mean().values:.4f} kg/kg')
print(f'mean SWdown change from {era_ds["SWdown"].mean().values:.1f} to {lin_ds["SWdown"].mean().values:.1f} W/m2')
print(f'mean LWdown change from {era_ds["LWdown"].mean().values:.1f} to {lin_ds["LWdown"].mean().values:.1f} W/m2')
################################################################################
# setting
print('\nchecking corrections are within ALMA ranges...')
alma_ranges = pd.DataFrame({
'SWdown' : (0,1360),
'LWdown' : (0,750),
'Tair' : (213,333),
'Qair' : (0,0.03),
'PSurf' : (5000,110000),
'Rainf' : (0,0.02),
'Snowf' : (0,0.0085),
'Wind_N' : (-75,75),
'Wind_E' : (-75,75),
},index=('min','max'))
for key in alma_ranges.columns:
assert (lin_ds[key].values >= alma_ranges.loc['min',key]).all() and (lin_ds[key].values <= alma_ranges.loc['max',key]).all(), f'corrected {key} outside ALMA physical range: {float(lin_ds[key].min())}'
########## ANNOTATIONS ###########
lin_ds['era_lat'] = era_ds.latitude
lin_ds['era_lon'] = era_ds.longitude
lin_ds['era_wind_hgt'] = era_ds.era_wind_hgt
print('done correcting ERA5 data with obs')
return lin_ds
def linear_debiasing(flux,sitepath,era,obs):
'''
This function does bias correction using linear regression technique from Vichard and Papale 2015 (FLUXNET)
DOI: 10.5194/essd-7-157-2015
'''
# # #### TESTING
# flux = 'LWdown'
# obs = clean_ds[flux].to_series()
# era = era_ds[flux].to_series()
# 0. match observed and reanalysis periods
obs_clean = obs.resample('H',closed='right',label='right').mean()
era_clean = era[obs_clean.index[0]:obs_clean.index[-1]].where(obs_clean.notna())
x = era_clean.dropna().values
y = obs_clean.dropna().values
# 1. allow intercept unless global radiation or wind (per Vichard & Papale 2015)
if flux not in ['SWdown','Wind_N','Wind_E','Wind']:
x = sm.add_constant(x)
# 2. calculate regression parameters
model = sm.OLS(y,x)
intercept = model.fit().params[0]
slope = model.fit().params[1]
# 3. bias correct era5 per eq 4 in V&P2015
corr = slope*era + intercept
print(f'{flux} slope factor: {slope:.3f}, intercept: {intercept:.3f}')
else:
# 2. calculate regression parameters
model = sm.OLS(y,x)
slope = model.fit().params[0]
# 3. bias correct era5 per eq 4 in V&P2015
corr = slope*era
print(f'{flux} slope factor: {slope:.3f}')
return corr
def calc_era5_corrections(era_ds,watch_ds,sitename,sitedata,sitepath,
plot_bias,obs_ds=None,ref_wind_hgt=wind_hgt):
'''takes era5 data (in alma format) and makes corrections based on site data '''
corr_ds = era_ds.copy()
print('\ncorrecting Rainf and Snowf')
try:
rain_obs = pd.read_csv(f'{sitepath}/timeseries/{sitename}_ghcnd_precip.csv',index_col=0,parse_dates=True)
# use controlled 10 yr period prior to observation
sdate = pd.Timestamp(obs_ds.time_coverage_end) - pd.DateOffset(years=10)
edate = pd.Timestamp(obs_ds.time_coverage_end)
# sdate, edate = rain_obs.index[0], rain_obs.index[-1]
span_years = (edate-sdate)/np.timedelta64(1, 'Y')
obs_total_precip_mm = rain_obs.loc[sdate:edate].values.sum()
print('')
print(f'available ghcnd years: {span_years:.2f}')
print('obs total precip %.2f mm (%s - %s) %.2f mm per year' %(obs_total_precip_mm, sdate.year,edate.year,obs_total_precip_mm/span_years))
# set ERA5 snowfall to zero during analysis period for Capitole (according to advice from Masson)
if sitename in ['FR-Capitole']:
era_ds['Snowf'].loc[dict(time=slice(obs_ds.time_coverage_start,obs_ds.time_coverage_end))] = 0.
corr_ds['Snowf'].loc[dict(time=slice(obs_ds.time_coverage_start,obs_ds.time_coverage_end))] = 0.
# tmp = era_ds.Rainf.sel(time=sdate),era_ds.Rainf.sel(time=edate)
era_total_rain_mm = era_ds.Rainf.sel(time=slice(sdate,edate)).values.sum()*3600
era_total_snow_mm = era_ds.Snowf.sel(time=slice(sdate,edate)).values.sum()*3600
era_total_precip_mm = era_total_snow_mm + era_total_rain_mm
print('')
print('era total rain %.2f mm (%s - %s) %.2f mm per year' %(era_total_rain_mm, sdate.year,edate.year,era_total_rain_mm/span_years))
print('era total snow %.2f mm (%s - %s) %.2f mm per year' %(era_total_snow_mm, sdate.year,edate.year,era_total_snow_mm/span_years))
print('era total precip %.2f mm (%s - %s) %.2f mm per year' %(era_total_precip_mm, sdate.year,edate.year,era_total_precip_mm/span_years))
try:
# tmp = watch_ds.Rainf.sel(time=sdate),watch_ds.Rainf.sel(time=edate)
watch_total_rain_mm = watch_ds.Rainf.sel(time=slice(sdate,edate)).values.sum()*3600
watch_total_snow_mm = watch_ds.Snowf.sel(time=slice(sdate,edate)).values.sum()*3600
watch_total_precip_mm = watch_total_snow_mm + watch_total_rain_mm
print('')
print('watch total rain %.2f mm (%s - %s) %.2f mm per year' %(watch_total_rain_mm, sdate.year,edate.year,watch_total_rain_mm/span_years))
print('watch total snow %.2f mm (%s - %s) %.2f mm per year' %(watch_total_snow_mm, sdate.year,edate.year,watch_total_snow_mm/span_years))
print('watch total precip %.2f mm (%s - %s) %.2f mm per year' %(watch_total_precip_mm, sdate.year,edate.year,watch_total_precip_mm/span_years))
except Exception:
print('no watch data found')
precip_corr_ratio = obs_total_precip_mm/era_total_precip_mm
print('era_corr = era x %.2f' %(precip_corr_ratio))
corr_ds['Rainf'].values = era_ds['Rainf'].values * precip_corr_ratio
corr_ds['Snowf'].values = era_ds['Snowf'].values * precip_corr_ratio
except Exception as e:
print('rain correction error:',e)
print('no GHCND precipitation file found, not undertaking bias correction')
# # setting very small values to zero
corr_ds.Rainf.values = corr_ds.Rainf.where(corr_ds.Rainf>1E-8,0.).values
corr_ds.Snowf.values = corr_ds.Snowf.where(corr_ds.Snowf>1E-8,0.).values
################################################################################
min_obs = 10
print('\ncorrecting Wind using log laws')
if len(obs_ds['Wind_N'].to_series().dropna().unique()) > min_obs:
print('finding effective era5 z0')
obs_wind = np.sqrt(obs_ds['Wind_N'].to_series()**2 + obs_ds['Wind_E'].to_series()**2)
era_wind = era_ds["Wind"].to_series().where(obs_wind.notna())
cor_wind = corr_ds["Wind"].to_series().where(obs_wind.notna())
eff_z0 = era_ds.fsr.values
bias = 0.5
print(f'mean observed wind speed: {obs_wind.mean():.2f} m/s')
# loop until mean corrected wind is close to obs wind
while abs(bias)>0.01:
print(f'trying z0: {eff_z0}')
cor_wind_eff = pd.Series(correct_wind(
ref_wind = cor_wind,
local_z0 = sitedata['roughness_length_momentum'],
local_d0 = sitedata['displacement_height'],
local_wind_hgt = sitedata['measurement_height_above_ground'],
ref_wind_hgt = era_ds.era_wind_hgt.values,
ref_z0 = eff_z0,
ref_d0 = 0,
mode = 0),index=cor_wind.index)
bias = cor_wind_eff.mean() - obs_wind.mean()
print(f'z0=eff: mean wind speed change from {era_wind.mean():.2f} to {cor_wind_eff.mean():.2f} m/s')
print(f'BIAS: {bias:.2f},MAE: {calc_MAE(sim=cor_wind_eff,obs=obs_wind):.2f} m/s')
eff_z0 = round(eff_z0 - bias/5,3)
print('')
print(f'done finding effecting era5 z0: {eff_z0}')
print(f'Converting ERA5 wind at {era_ds.era_wind_hgt.values}m height with grid {era_ds.fsr.values:.2f}m roughness, effective {eff_z0}m roughness')
print(f' to site {sitedata["measurement_height_above_ground"]}m height with {sitedata["roughness_length_momentum"]}m roughness, {sitedata["displacement_height"]:.2f}m displacement')
corr_ds['Wind_N'].values = correct_wind(
ref_wind = era_ds['Wind_N'].values,
local_z0 = sitedata['roughness_length_momentum'],
local_d0 = sitedata['displacement_height'],
local_wind_hgt = sitedata['measurement_height_above_ground'],
ref_wind_hgt = era_ds.era_wind_hgt.values,
ref_z0 = eff_z0,
ref_d0 = 0,
mode = 0)
corr_ds['Wind_E'].values = correct_wind(
ref_wind = era_ds['Wind_E'].values,
local_z0 = sitedata['roughness_length_momentum'],
local_d0 = sitedata['displacement_height'],
local_wind_hgt = sitedata['measurement_height_above_ground'],
ref_wind_hgt = era_ds.era_wind_hgt.values,
ref_z0 = eff_z0,
ref_d0 = 0,
mode = 0)
corr_ds['Wind'].values = np.sqrt(corr_ds['Wind_N'].values**2 + corr_ds['Wind_E'].values**2)
print('')
print(f'mean observed | |
import numpy as np
import torch.nn as nn
import torch
from torch.nn import functional as F
from scipy.spatial.transform import Rotation as R
from scipy.ndimage import gaussian_filter
import scipy.io as sio
def OnehotEncoding(arr, val, c):
# val = np.repeat(np.array(val).reshape(2, 2).T, 3).reshape(-1, 1)
ind = (arr - val[0]) // ((val[1] - val[0]) / (c - 1))
ind = ind.type(dtype=torch.long)
out = torch.zeros((c, 1))
out[ind, :] = 1
return out
def OnehotDecoding(arr, val, c):
# val = np.repeat(np.array(val).reshape(2, 2).T, 3).reshape(-1, 6)
out = (arr * ((val[1] - val[0]) / (c - 1))) + val[0]
return out
def ReadText(vis):
testwindow = vis.text("Hello World!")
return 0
def PlotImage(vis, img, win, env, title=""):
# img = img.detach.cpu().numpy()
win = vis.images(img, win=win, opts=dict(title=title), env=env)
return win
def PlotLoss(vis, x, y, win, env, legend, title=""):
if win == None:
win = vis.line(Y=y, X=x, win=win, opts=dict(title=title, legend=legend, showlegend=True), env=env)
else:
win = vis.line(Y=y, X=x, win=win, opts=dict(title=title, legend=legend, showlegend=True), env=env,
update='append')
# win = vis.line(Y=y, X=x, win=win, opts=dict(title=title, legend=['Train', 'Validation'], showlegend=True), update='append')
return win
def crop_image(image, label, j):
sz = image.size()
x = [x for x in range(sz[2] //128)]
y = [y for y in range(sz[3] //128)]
x = np.repeat(np.tile(x, (1, sz[2] //128)).reshape((-1)), image.size()[-1]//32 + 1)
y = np.repeat(y, sz[3] //128 * (image.size()[-1]//32 + 1))
z = [z for z in range(image.size()[-1]//32 + 1)]
z = np.tile(z, (1, sz[2] //128 * sz[3] //128)).reshape((-1))
if j % (image.size()[-1]//32 + 1) == image.size()[-1]//32:
img = image[:, :, x[j] * 128:(x[j] + 1) * 128, y[j] * 128:(y[j] + 1) * 128, -32:]
lb = label[:, :, x[j] * 128:(x[j] + 1) * 128, y[j] * 128:(y[j] + 1) * 128, -32:]
else:
img = image[:, :, x[j] * 128:(x[j] + 1) * 128, y[j] * 128:(y[j] + 1) * 128, z[j] * 32:(z[j] + 1) * 32]
lb = label[:, :, x[j] * 128:(x[j] + 1) * 128, y[j] * 128:(y[j] + 1) * 128, z[j] * 32:(z[j] + 1) * 32]
return img, lb
def normalization(input):
min = input.min()
input = input - min
max = input.max()
output = input / max
return output
def standardization(input):
mean = input.mean()
std = torch.std(input)
input = input - mean
output = input/std
return output
def CE(output, target, weights):
nll = nn.NLLLoss(weight=torch.Tensor([1, 7500]).float())
return nll(output, target)
def dice_loss(true, logits, eps=1e-7):
"""Computes the SørensenDice loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the dice loss so we
return the negated dice loss.
Args:
true: a tensor of shape [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
dice_loss: the SørensenDice loss.
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[torch.tensor(true.squeeze(1), dtype=torch.long)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, true.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
dice_loss = (2. * intersection / (cardinality + eps)).mean()
return (1 - dice_loss)
def cartesian_product(*arrays):
la = len(arrays)
dtype = np.result_type(*arrays)
arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)
for i, a in enumerate(np.ix_(*arrays)):
arr[..., i] = a
return arr.reshape(-1, la)
# intersection function
def isect_line_plane_v3(p0, p1, p_co, p_no, epsilon=1e-6):
"""
p0, p1: Define the line.
p_co, p_no: define the plane:
p_co Is a point on the plane (plane coordinate).
p_no Is a normal vector defining the plane direction;
(does not need to be normalized).
Return a Vector or None (when the intersection can't be found).
"""
# # Test
# p0 = torch.tensor([[0, 0, 0], [0, 0, 0]], dtype=torch.float32).view(2, 3).T
# p1 = torch.tensor([[0, 0, 1], [1, 2, 3]], dtype=torch.float32).view(2, 3).T
#
# p_co = torch.tensor([20, 10, 30], dtype=torch.float32).view(3, 1)
# p_no = torch.tensor([0, 0, 10], dtype=torch.float32).view(3, 1)
# Normalize the normal vector of the plane
n = torch.norm(p_no, dim=0)
p_no = p_no / n
# Normalize the direction vector of the line and calculate degree between the normal vector and the direction vector
u = p1 - p0
n = torch.norm(u, dim=0)
u = u / n
dot = torch.mm(u.T, p_no)
# idx = np.where(abs(dot.cpu()) > torch.tensor(epsilon))[0]
# p0 = p0[:, idx]
# p1 = p1[:, idx]
# u = p1 - p0
# n = torch.norm(u, dim=0)
# u = u / n
# dot = torch.mm(u.T, p_no)
# The factor of the point between p0 -> p1 (0 - 1)
# if 'fac' is between (0 - 1) the point intersects with the segment.
# Otherwise:
# < 0.0: behind p0.
# > 1.0: infront of p1.
w = p0 - p_co
fac = -torch.mm(w.T, p_no) / dot
u = u * fac.T
vec = p0 + u
# tt = vec.cpu().numpy()
return vec
# ----------------------
# generic math functions
def dot_v3v3(v0, v1):
return (
(v0[:, 0] * v1[:, 0]) +
(v0[:, 1] * v1[:, 1]) +
(v0[:, 2] * v1[:, 2])
)
def len_squared_v3(v0):
return dot_v3v3(v0, v0)
def mul_v3_fl(v0, f):
return (
v0[0] * f,
v0[1] * f,
v0[2] * f,
)
def create_ranges_nd(start, stop, N, endpoint=True):
if endpoint==1:
divisor = N-1
else:
divisor = N
steps = (1.0/divisor) * (stop - start)
return start[...,None] + steps[...,None]*np.arange(N)
def DRR_generation(CT, R_pred, num, proj_pix):
"""
:param CT:
:param R_pred:
:param num:
:param R_:
:return:
"""
ct_pix = [512, 512]
min_v = torch.tensor(np.array([-(ct_pix[0]-1)/2, -(ct_pix[1]-1)/2, -(CT.size(1)-1)/2]), dtype=torch.float32).cuda(1)
max_v = torch.tensor(np.array([(ct_pix[0]-1)/2, (ct_pix[1]-1)/2, (CT.size(1)-1)/2]), dtype=torch.float32).cuda(1)
# Camera matrix
R_pred = R_pred.cpu().detach().numpy()
# R_pred = np.array([[15, -15, 0, 0, 0, 0]], dtype=np.float32)
# R_pred = R_.cpu().numpy()
Rx = R.from_euler('x', -R_pred[:, 0], degrees=True)
Ry = R.from_euler('y', -R_pred[:, 1], degrees=True)
Rz = R.from_euler('z', -R_pred[:, 2], degrees=True)
r = Rx * Ry * Rz
O = torch.tensor([0, 0, -160], dtype=torch.float32).view(3, 1, 1).cuda(1)
t = -O - torch.tensor(np.array([[R_pred[:, 3]], [R_pred[:, 4]], [R_pred[:, 5]]])).cuda(1)
# t = (t - (min_v.reshape(3, 1, 1) + max_v.reshape(3, 1, 1))/2) / ((max_v.reshape(3, 1, 1) - min_v.reshape(3, 1, 1))/2)
f = 256
n = 200
K = torch.tensor([[f, 0, proj_pix[0]/2], [0, f, proj_pix[1]/2], [0, 0, 1]], dtype=torch.float32).cuda(1)
rot = torch.tensor(r.as_dcm(), dtype=torch.float32).cuda(1)
## For visualization (1)
# s_min, s_max = 0, 200
# ss = 1
# img_pts = np.array([np.mgrid[1:proj_pix[1]+1, 1:proj_pix[0]+1].T.reshape(-1, 2)] * int(((s_max-s_min)/ss)))
# img_pts = torch.tensor(img_pts, dtype=torch.float32).view((-1, 2))
# s = torch.tensor(np.mgrid[s_min:s_max:ss].repeat(proj_pix[0] * proj_pix[1]), dtype=torch.float32)
# s = s.view((-1, 1))
# img_pts = torch.cat([img_pts*s, s], dim=-1).numpy()
# img_pts = img_pts.reshape((int((s_max - s_min) / ss), proj_pix[0], proj_pix[1], 3)).transpose((3, 0, 1, 2)).reshape(
# 3, -1, 1)
# img_pts = torch.tensor(np.tile(img_pts, (1, 1, num)).transpose((2, 0, 1))).cuda(1)
# backp = torch.matmul(torch.matmul(torch.inverse(rot), torch.inverse(K)),
# img_pts - torch.matmul(K, t.view((3, num))).T.reshape((num, 3, 1)))
# backp = backp.view((num, 3, int((s_max - s_min) / ss), -1)).permute((0, 3, 2, 1)) # num, -1, 200, 3
## Original Code (2)
img_pts = np.array([np.mgrid[1:proj_pix[1] + 1, 1:proj_pix[0] + 1].T.reshape(-1, 2)] * 2)
img_pts = torch.tensor(img_pts, dtype=torch.float32).view((-1, 2))
s = torch.tensor(np.mgrid[0:2:1].repeat(proj_pix[0] * proj_pix[1]), dtype=torch.float32)
s = s.view((-1, 1))
img_pts = torch.cat([img_pts*s, s], dim=-1).numpy()
img_pts = img_pts.reshape((2, proj_pix[0], proj_pix[1], 3)).transpose((3, 0, 1, 2)).reshape(3, -1, 1)
img_pts = torch.tensor(np.tile(img_pts, (1, 1, num)).transpose((2, 0, 1))).cuda(1)
backp = torch.matmul(torch.matmul(torch.inverse(rot), torch.inverse(K)),
img_pts - torch.matmul(K, t.view((3, num))).T.reshape((num, 3, 1)))
backp = backp.view((num, 3, 2, -1)).permute((0, 3, 2, 1)) # num, -1, 200, 3
# x = np.linspace(-ct_pix[0]/2, ct_pix[0]/2 -1, 512)
# y = np.linspace(-ct_pix[1] / 2, ct_.cuda()pix[1] / 2 -1, 512)
# z = np.linspace(-CT.size(1)/2, CT.size(1)/2-1, CT.size(1))
#
# tt = cartesian_product(x, y, z)
normals = torch.tensor([[1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0], [0, 0, 1], [0, 0, -1]], dtype=torch.float32).cuda(1)
pts = normals
n_backp = (backp - (min_v + max_v) / 2) / ((max_v - min_v) / 2)
t = -(t.view((3)) - (min_v + max_v) / 2) | |
# coding: utf-8
"""
Emby Server API
Explore the Emby Server API # noqa: E501
OpenAPI spec version: 4.1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from embyapi.api_client import ApiClient
class LibraryServiceApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_items(self, ids, **kwargs): # noqa: E501
"""Deletes an item from the library and file system # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_items(ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str ids: Ids (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_items_with_http_info(ids, **kwargs) # noqa: E501
else:
(data) = self.delete_items_with_http_info(ids, **kwargs) # noqa: E501
return data
def delete_items_with_http_info(self, ids, **kwargs): # noqa: E501
"""Deletes an item from the library and file system # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_items_with_http_info(ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str ids: Ids (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ids'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_items" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'ids' is set
if ('ids' not in params or
params['ids'] is None):
raise ValueError("Missing the required parameter `ids` when calling `delete_items`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('Ids', params['ids'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Items', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_items_by_id(self, id, **kwargs): # noqa: E501
"""Deletes an item from the library and file system # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_items_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_items_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_items_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_items_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Deletes an item from the library and file system # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_items_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_items_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_items_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Items/{Id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_albums_by_id_similar(self, id, **kwargs): # noqa: E501
"""Finds albums similar to a given album. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_albums_by_id_similar(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:param str include_item_types: Optional. If specified, results will be filtered based on item type. This allows multiple, comma delimeted.
:param bool enable_images: Optional, include image information in output
:param bool enable_user_data: Optional, include user data
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param str user_id: Optional. Filter by user id, and attach user data
:param int limit: Optional. The maximum number of records to return
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines, TrailerUrls
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_albums_by_id_similar_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_albums_by_id_similar_with_http_info(id, **kwargs) # noqa: E501
return data
def get_albums_by_id_similar_with_http_info(self, id, **kwargs): # noqa: E501
"""Finds albums similar to a given album. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_albums_by_id_similar_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:param str include_item_types: Optional. If specified, results will be filtered based on item type. This allows multiple, comma delimeted.
:param bool enable_images: Optional, include image information in output
:param bool enable_user_data: Optional, include user data
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param str user_id: Optional. Filter by user id, and attach user data
:param int limit: Optional. The maximum number of records to return
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines, TrailerUrls
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'include_item_types', 'enable_images', 'enable_user_data', 'image_type_limit', 'enable_image_types', 'user_id', 'limit', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_albums_by_id_similar" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_albums_by_id_similar`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
if 'include_item_types' in params:
query_params.append(('IncludeItemTypes', params['include_item_types'])) # noqa: E501
if 'enable_images' in params:
query_params.append(('EnableImages', params['enable_images'])) # noqa: E501
if 'enable_user_data' in params:
query_params.append(('EnableUserData', params['enable_user_data'])) # noqa: E501
if 'image_type_limit' in params:
query_params.append(('ImageTypeLimit', params['image_type_limit'])) # noqa: E501
if 'enable_image_types' in params:
query_params.append(('EnableImageTypes', params['enable_image_types'])) # noqa: E501
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
if 'limit' in params:
query_params.append(('Limit', params['limit'])) # noqa: E501
if 'fields' in params:
query_params.append(('Fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
| |
"""
Varius microopcodes for different ootypesystem based backends
These microopcodes are used to translate from the ootype operations to
the operations of a particular backend. For an example, see
cli/opcodes.py which maps from ootype opcodes to sets of metavm
instructions.
See the MicroInstruction class for discussion on the methods of a
micro-op.
"""
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.extfunc import ExtFuncEntry, is_external
class Generator(object):
def add_comment(self, text):
"""
Called w/in a function w/ a text string that could be
usefully added to the output.
"""
pass
def add_section(self, text):
"""
Prints a distinguished comment
"""
self.add_comment("_" * 70)
self.add_comment(text)
def pop(self, TYPE):
""" Pops a value off the top of the stack, which is of the
given TYPE.
Stack: val, ... -> ..."""
raise NotImplementedError
def dup(self, TYPE):
""" Duplicates the top of the stack, which is of the given TYPE.
Stack: val, ... -> val, val, ..."""
raise NotImplementedError
def emit(self, instr, *args):
"""
Invoked by InstructionList.render() when we encounter a
non-MicroInstruction in the list of instructions. This is
typically used to encode small single operands as strings.
"""
pass
def load(self, v):
"""
Loads an item 'v' onto the stack
Stack: ... -> v, ...
"""
pass
def store(self, v):
"""
Stores an item from the stack into 'v'
Stack: value, ... -> ...
"""
pass
def set_field(self, CONCRETETYPE, fieldname):
"""
Stores a value into a field.
'CONCRETETYPE' should be the type of the class that has the field
'fieldname' is a string with the name of the field
Stack: value, item, ... -> ...
"""
raise NotImplementedError
def get_field(self, CONCRETETYPE, fieldname):
"""
Gets a value from a specified field.
'CONCRETETYPE' should be the type of the class that has the field
'fieldname' is the name of the field
Stack: item, ... -> ...
"""
raise NotImplementedError
def downcast(self, TYPE):
"""
Casts the object on the top of the stack to be of the specified
ootype. Assumed to raise an exception on failure.
Stack: obj, ... -> obj, ...
"""
raise NotImplementedError
def getclassobject(self, OOINSTANCE):
"""
Gets the class object for the OOINSTANCE. The type of the class
object will depend on the backend, of course; for example in JVM
it is java.lang.Class.
"""
raise NotImplementedError
def instantiate(self):
"""
Instantiates an instance of the Class object that is on top of
the stack. Class objects refers to an object representing a
class. Used to implement RuntimeNew.
Stack: class_obj, ... -> instance_obj, ...
"""
raise NotImplementedError
def instanceof(self, TYPE):
"""
Determines whether the object on the top of the stack is an
instance of TYPE (an ootype).
Stack: obj, ... -> boolean, ...
"""
pass
def branch_unconditionally(self, target_label):
""" Branches to target_label unconditionally """
raise NotImplementedError
def branch_conditionally(self, iftrue, target_label):
""" Branches to target_label depending on the value on the top of
the stack. If iftrue is True, then the branch occurs if the value
on top of the stack is true; if iftrue is false, then the branch
occurs if the value on the top of the stack is false
Stack: cond, ... -> ... """
raise NotImplementedError
def branch_if_equal(self, target_label):
"""
Pops two values from the stack and branches to target_label if
they are equal.
Stack: obj1, obj2, ... -> ...
"""
raise NotImplementedError
def call_graph(self, graph):
""" Invokes the function corresponding to the given graph. The
arguments to the graph have already been pushed in order
(i.e., first argument pushed first, etc). Pushes the return
value.
Stack: argN...arg2, arg1, arg0, ... -> ret, ... """
raise NotImplementedError
def prepare_generic_argument(self, ITEMTYPE):
"""
Invoked after a generic argument has been pushed onto the stack.
May not need to do anything, but some backends, *cough*Java*cough*,
require boxing etc.
"""
return # by default do nothing
def call_method(self, OOCLASS, method_name):
""" Invokes the given method on the object on the stack. The
this ptr and all arguments have already been pushed.
Stack: argN, arg2, arg1, this, ... -> ret, ... """
raise NotImplementedError
def prepare_call_primitive(self, op, module, name):
""" see call_primitive: by default does nothing """
pass
def call_primitive(self, op, module, name):
""" Like call_graph, but it has been suggested that the method be
rendered as a primitive. The full sequence for invoking a primitive:
self.prepare_call_primitive(op, module, name)
for each arg: self.load(arg)
self.call_primitive(op, module, name)
Stack: argN...arg2, arg1, arg0, ... -> ret, ... """
raise NotImplementedError
def prepare_call_oostring(self, OOTYPE):
" see call_oostring "
pass
def call_oostring(self, OOTYPE):
""" Invoked for the oostring opcode with both operands
(object, int base) already pushed onto the stack.
prepare_call_oostring() is invoked before the operands are
pushed."""
raise NotImplementedError
def prepare_call_oounicode(self, OOTYPE):
" see call_oounicode "
pass
def call_oounicode(self, OOTYPE):
""" Invoked for the oounicode opcode with the operand already
pushed onto the stack. prepare_call_oounicode() is invoked
before the operand is pushed. """
raise NotImplementedError
def new(self, TYPE):
""" Creates a new object of the given type.
Stack: ... -> newobj, ... """
raise NotImplementedError
def oonewarray(self, TYPE, length):
""" Creates a new array of the given type with the given length.
Stack: ... -> newobj, ... """
raise NotImplementedError
def push_null(self, TYPE):
""" Push a NULL value onto the stack (the NULL value represents
a pointer to an instance of OOType TYPE, if it matters to you). """
raise NotImplementedError
def push_primitive_constant(self, TYPE, value):
""" Push an instance of TYPE onto the stack with the given
value. TYPE will be one of the types enumerated in
oosupport.constant.PRIMITIVE_TYPES. value will be its
corresponding ootype implementation. """
raise NotImplementedError
def get_instrution_count(self):
"""
Return the number of opcodes in the current function, or -1
if the backend doesn't care about it. Default is -1
"""
return -1
class InstructionList(list):
def render(self, generator, op):
for instr in self:
if isinstance(instr, MicroInstruction):
instr.render(generator, op)
else:
generator.emit(instr)
def __call__(self, *args):
return self.render(*args)
class MicroInstruction(object):
def render(self, generator, op):
"""
Generic method which emits code to perform this microinstruction.
'generator' -> the class which generates actual code emitted
'op' -> the instruction from the FlowIR
"""
pass
def __str__(self):
return self.__class__.__name__
def __call__(self, *args):
return self.render(*args)
class _DoNothing(MicroInstruction):
def render(self, generator, op):
pass
class PushArg(MicroInstruction):
""" Pushes a given operand onto the stack. """
def __init__(self, n):
self.n = n
def render(self, generator, op):
generator.load(op.args[self.n])
class _PushAllArgs(MicroInstruction):
""" Pushes all arguments of the instruction onto the stack in order. """
def __init__(self, slice=None):
""" Eventually slice args
"""
self.slice = slice
def render(self, generator, op):
if self.slice is not None:
args = op.args[self.slice]
else:
args = op.args
for arg in args:
generator.load(arg)
class PushPrimitive(MicroInstruction):
def __init__(self, TYPE, value):
self.TYPE = TYPE
self.value = value
def render(self, generator, op):
generator.push_primitive_constant(self.TYPE, self.value)
class _StoreResult(MicroInstruction):
def render(self, generator, op):
generator.store(op.result)
class _SetField(MicroInstruction):
def render(self, generator, op):
this, field, value = op.args
## if field.value == 'meta':
## return # TODO
if value.concretetype is ootype.Void:
return
generator.load(this)
generator.load(value)
generator.set_field(this.concretetype, field.value)
class _GetField(MicroInstruction):
def render(self, generator, op):
# OOType produces void values on occassion that can safely be ignored
if op.result.concretetype is ootype.Void:
return
this, field = op.args
generator.load(this)
generator.get_field(this.concretetype, field.value)
class _DownCast(MicroInstruction):
""" Push the argument op.args[0] and cast it to the desired type, leaving
result on top of the stack. """
def render(self, generator, op):
RESULTTYPE = op.result.concretetype
generator.load(op.args[0])
generator.downcast(RESULTTYPE)
class _InstanceOf(MicroInstruction):
""" Push the argument op.args[0] and cast it to the desired type, leaving
result on top of the stack. """
def render(self, generator, op):
RESULTTYPE = op.result.concretetype
generator.load(op.args[0])
generator.instanceof(RESULTTYPE)
# There are three distinct possibilities where we need to map call differently:
# 1. Object is marked with rpython_hints as a builtin, so every attribut access
# and function call goes as builtin
# 2. Function called is a builtin, so it might be mapped to attribute access, builtin function call
# or even method call
# 3. Object on which method is called is primitive object and method is mapped to some
# method/function/attribute access
class _GeneralDispatcher(MicroInstruction):
def | |
<reponame>RichardLitt/Vesper
"""Module containing `Schedule` class."""
from collections import namedtuple
from threading import Event, Thread
import datetime
import itertools
import re
import jsonschema
import pytz
from vesper.util.notifier import Notifier
import vesper.ephem.ephem_utils as ephem_utils
import vesper.util.time_utils as time_utils
import vesper.util.yaml_utils as yaml_utils
# TODO: Consider creating a separate interval module, including intersection
# functions.
Interval = namedtuple('Interval', ('start', 'end'))
Transition = namedtuple('Transition', ('time', 'state'))
class Schedule:
"""
Sequence of UTC intervals.
A `Schedule` is a sequence of UTC time intervals, interpreted as a
function from UTC time to a boolean *state*. The schedule is `True`
or *on* from the start of each interval up to but not including the
end of the interval, and the schedule is `False` or *off* at all
other times. A schedule can also be interpreted as a sequence of
transitions, with a transition at each interval boundary from the
state approaching the boundary to the state at the boundary.
Transitions at the beginnings of intervals are from `False` to
`True`, and transitions at the ends of intervals are from `True`
to `False`.
"""
MIN_DATETIME = pytz.utc.localize(datetime.datetime.min)
MAX_DATETIME = pytz.utc.localize(datetime.datetime.max)
@staticmethod
def compile_yaml(spec, lat=None, lon=None, time_zone=None):
try:
spec = yaml_utils.load(spec)
except Exception as e:
raise ValueError(
'Could not load schedule YAML. Error message was: {}'.format(
e.message))
return Schedule.compile_dict(spec, lat, lon, time_zone)
@staticmethod
def compile_dict(spec, lat=None, lon=None, time_zone=None):
context = _Context(lat, lon, time_zone)
return _compile_schedule(spec, context)
def __init__(self, intervals):
self._intervals = _normalize(intervals)
def get_intervals(self, start=None, end=None):
"""
Returns an iterator for the intervals of this schedule that
intersect the query interval [`start`, `end`]. For the purpose
of determining intersection, the schedule intervals are
considered to be closed at both ends.
"""
start, end = _complete_query_interval(start, end)
if start <= end:
# query interval is not empty
i = self._find_first_interval_with_end_ge(start)
intervals = self._intervals
while i != len(intervals) and intervals[i].start <= end:
yield intervals[i]
i += 1
def _find_first_interval_with_end_ge(self, dt):
"""
Returns the index of the first interval of this schedule whose end
is at least `dt`, or the number of intervals of the schedule if there
is no such interval.
"""
intervals = self._intervals
if len(intervals) == 0 or dt > intervals[-1].end:
# there is no interval of this schedule whose end is at least `dt`.
return len(intervals)
else:
# there is an interval of this schedule whose end is at least `dt`.
low = -1
high = len(intervals) - 1
# Invariant: index of first interval of this schedule whose
# end is at least `dt` is in (`low`, `high`].
while high != low + 1:
mid = (low + high) // 2
if dt > intervals[mid].end:
low = mid
else:
high = mid
return high
def get_transitions(self, start=None, end=None):
"""
Returns an iterator for the transitions of this schedule that
are in the query interval [`start`, `end`].
"""
start, end = _complete_query_interval(start, end)
for s, e in self.get_intervals(start, end):
if s >= start:
yield Transition(s, True)
if e <= end:
yield Transition(e, False)
def get_state(self, dt):
i = self._find_first_interval_with_end_ge(dt)
if i == len(self._intervals):
return False
else:
return self._intervals[i].start <= dt
def _normalize(intervals):
if len(intervals) <= 1:
return tuple(intervals)
else:
# have at least two intervals
# Sort intervals by start time.
sorted_intervals = sorted(intervals, key=lambda i: i.start)
normalized_intervals = []
normalized_interval = sorted_intervals[0]
for interval in itertools.islice(sorted_intervals, 1, None):
if normalized_interval.end < interval.start:
# `normalized_interval` and `interval` do not intersect
normalized_intervals.append(normalized_interval)
normalized_interval = interval
else:
# `normalized_interval` and `interval` intersect
# Update end of `normalized_interval` if needed.
if interval.end > normalized_interval.end:
normalized_interval = normalized_interval._replace(
end=interval.end)
normalized_intervals.append(normalized_interval)
return tuple(normalized_intervals)
def _complete_query_interval(start, end):
if start is None:
start = Schedule.MIN_DATETIME
if end is None:
end = Schedule.MAX_DATETIME
return (start, end)
class ScheduleRunner(Thread):
def __init__(self, schedule):
super().__init__(daemon=True)
self._schedule = schedule
self._notifier = Notifier(schedule)
self._stop_event = Event()
self._terminated_event = Event()
def add_listener(self, listener):
self._notifier.add_listener(listener)
def remove_listener(self, listener):
self._notiier.remove_listener(listener)
def clear_listeners(self):
self._notifier.clear_listeners()
def run(self):
schedule = self._schedule
stop_event = self._stop_event
terminated_event = self._terminated_event
notify = self._notifier.notify_listeners
now = time_utils.get_utc_now()
state = schedule.get_state(now)
notify('schedule_run_started', now, state)
transitions = tuple(schedule.get_transitions(start=now))
for i, t in enumerate(transitions):
self._wait_for_transition_or_stop(t)
if stop_event.is_set():
# stop requested
# Because there are multiple threads at play, it is
# possible (though unlikely) that `now` follows or
# equals the times of one or more transitions in
# `transitions[i:]`, i.e. that the transitions have
# occurred but the schedule's listeners have not been
# notified of them. We perform the notifications here.
while i < len(transitions) and transitions[i].time <= now:
t = transitions[i]
notify('schedule_state_changed', t.time, t.state)
i += 1
now = time_utils.get_utc_now()
state = schedule.get_state(now)
notify('schedule_run_stopped', now, state)
terminated_event.set()
return
else:
notify('schedule_state_changed', t.time, t.state)
# If we get here, the schedule run completed. The schedule is off
# since we are at or past the end of every interval of the schedule.
now = time_utils.get_utc_now()
notify('schedule_run_completed', now, False)
terminated_event.set()
def _wait_for_transition_or_stop(self, t):
while True:
now = time_utils.get_utc_now()
seconds = (t.time - now).total_seconds()
if seconds <= 0:
# transition time reached
return
else:
# transition time not reached
# We limit the wait duration to avoid `OverflowError`
# exceptions that we have seen (admittedly for very
# large numbers of seconds) if we don't. We keep the
# maximum wait duration fairly small on the hunch that
# doing so might improve the accuracy of schedule
# transition notification times, at least on some
# platforms.
seconds = min(seconds, 5)
self._stop_event.wait(seconds)
if self._stop_event.is_set():
return
def stop(self):
self._stop_event.set()
def wait(self, timeout=None):
self._terminated_event.wait(timeout)
class ScheduleListener:
def schedule_run_started(self, schedule, time, state):
pass
def schedule_state_changed(self, schedule, time, state):
pass
def schedule_run_stopped(self, schedule, time, state):
pass
def schedule_run_completed(self, schedule, time, state):
pass
# The functions below compile schedules from dictionary schedule
# specifications to `Schedule` objects. There are two sets of functions
# involved, the *parse* functions and the *compile* functions. The parse
# functions parse schedule dates and/or times from strings, while the
# compile functions compile dictionary schedule specifications into
# `Schedule` objects. The parse functions are lower-level than the
# compile functions, and are invoked by them.
'''
Grammar for schedule dates and times:
date ::= yyyy-mm-dd
time ::= nonoffset_time | offset_time
nonoffset_time ::= time_24 | am_pm_time | time_name | solar_event_name
time_24 ::= h?h:mm:ss (with hour in [0, 23])
am_pm_time ::= time_12 am_pm
time_12 ::= h?h:mm:ss | h?h:mm | h?h (with hour in [1, 12])
am_pm ::= 'am' | 'pm'
time_name ::= 'noon' | 'midnight'
solar_event_name = 'sunrise' | 'sunset' | 'civil dawn' | 'civil dusk' |
'nautical dawn' | 'nautical dusk' | 'astronomical dawn' |
'astronomical dusk'
offset_time ::= offset preposition solar_event_name
offset ::= hhmmss_offset | units_offset
hhmmss_offset ::= h?h:mm:ss
units_offset ::= number units (with number 1 if units singular)
number ::= d+ | d+.d* | .d+
units ::= 'hours' | 'hour' | 'minutes' | 'minute' | 'seconds' | 'second'
preposition = 'before' | 'after'
date_time ::= date time
Time examples:
12:34:56
12 pm
3:45 am
noon
midnight
sunrise, sunset, etc.
1:00:00 before sunset
1 hour after sunset
2 hours after sunset
30 minutes after civil dusk
10 seconds before nautical dawn
Date/time examples:
2016-11-28 12:34:56
2016-11-28 12 pm
2016-11-28 noon
2016-11-28 sunset
2016-11-18 1 hour after sunset
Example schedules:
interval:
start: 2016-07-15 1 hour after sunset
end: 2016-07-16 30 minutes before sunrise
intervals:
- start: 2016-07-15 noon
duration: 1 hour
- start: 2016-07-15 2 pm
duration: 1 hour
daily:
start_time: 1 hour before sunrise
end_time: 2 hours after sunrise
start_date: 2016-07-15
end_date: 2016-10-15
union:
- intervals
...
- daily
...
Ideas not yet | |
<reponame>redhog/ferenda
# -*- coding: utf-8 -*-
# This fixture does a bunch of real HTTP request against a selected
# server (determined by the environment variable FERENDA_TESTURL,
# which is http://localhost:8000/ by default)
#
# When running against a local instance, it's important that this has
# been initialized with the documents in lagen/nu/res/scripts/testdata.txt
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
# sys
import os
import unittest
import codecs
import re
from urllib.parse import urljoin
from datetime import datetime
from urllib.parse import urlparse
# 3rdparty
from layeredconfig import LayeredConfig, Defaults
import requests
from bs4 import BeautifulSoup
from rdflib import Graph, URIRef
from rdflib.namespace import DCTERMS
# own
from ferenda.elements import Link, serialize
from ferenda.testutil import FerendaTestCase
from ferenda.sources.legal.se import RPUBL
from lagen.nu import SFS, LNKeyword
from lagen.nu.wsgiapp import WSGIApp
from ferenda import manager
class TestLagen(unittest.TestCase, FerendaTestCase):
baseurl = os.environ.get("FERENDA_TESTURL", "http://localhost:8000/")
def assert_status(self, url, code):
res = requests.get(url, headers={'Accept': 'text/html'})
self.assertEqual(res.status_code, code, url)
return res
def assert200(self, url):
return self.assert_status(url, 200)
def assert404(self, url):
return self.assert_status(url, 404)
def get(self, url, raise_for_status=False, **kwargs):
if 'headers' not in kwargs:
kwargs['headers']={'Accept': 'text/html'}
res = requests.get(url, **kwargs)
if raise_for_status:
res.raise_for_status()
return res
class TestPaths(TestLagen):
def test_frontpage(self):
self.assert200(self.baseurl)
def test_nonexist(self):
self.assert404(self.baseurl + "this-resource-does-not-exist")
def test_specific_sfs(self):
self.assert200(self.baseurl + "1999:175")
def test_specific_dv(self):
self.assert200(self.baseurl + "dom/nja/2015s180") # basefile HDO/Ö6229-14
def test_specific_sou(self):
self.assert200(self.baseurl + "sou/1997:39")
# test old-style URI (for a while)
self.assert200(self.baseurl + "utr/sou/1997:39")
def test_specific_prop(self):
self.assert200(self.baseurl + "prop/1997/98:44")
def test_specific_keyword(self):
self.assert200(self.baseurl + "begrepp/Personuppgift")
def test_specific_keyword_tricky(self):
self.assert200(self.baseurl + "begrepp/Sekundär_sekretessbestämmelse")
def test_facsimile_page(self):
res = self.get(self.baseurl + "sou/1997:39/sid557.png")
self.assertEqual(200, res.status_code)
self.assertEqual("image/png", res.headers["Content-Type"])
# assert trough first 8 bytes (magic number) that this really
# is a legit png
import binascii
self.assertEqual(b"89504e470d0a1a0a", binascii.hexlify(res.content[:8]))
# assert that the old-style URI still works (for a time)
res = self.get(self.baseurl + "utr/sou/1997:39/sid557.png")
self.assertEqual(200, res.status_code)
def test_feed_html(self):
self.assert200(self.baseurl + "dataset/sitenews/feed")
self.assert200(self.baseurl + "dataset/sfs/feed?rdf_type=type/forordning")
def test_feed_atom(self):
self.assert200(self.baseurl + "dataset/sitenews/feed.atom")
self.assert200(self.baseurl + "dataset/sfs/feed.atom?rdf_type=type/forordning")
def test_attached_css(self):
res = self.get(self.baseurl + "bolfs/2008:1")
self.assertEqual(200, res.status_code)
self.assertEqual("text/html; charset=utf-8", res.headers["Content-Type"])
self.assertIn('<link rel="stylesheet" href="/bolfs/2008:1?dir=parsed&attachment=index.css"/>', res.text[:1200])
res = self.get(self.baseurl + "bolfs/2008:1?dir=parsed&attachment=index.css")
self.assertEqual(200, res.status_code)
self.assertEqual("text/css; charset=utf-8", res.headers["Content-Type"])
class TestPages(TestLagen):
def test_doctype(self):
for doc in ("", "1999:175", "dom/nja/2015s180", "sou/1997:39", "prop/1997/98:44",
"begrepp/Personuppgift", "begrepp/Sekundär_sekretessbestämmelse"):
resp = self.get(self.baseurl + doc)
self.assertEqual('<!DOCTYPE html SYSTEM "about:legacy-compat">',
resp.text[:44], "Wrong doctype for %s" % doc)
def test_frontpage_links(self):
# <a> elements should have a href attribute (you'd think that
# was obvious, but it's not)
res = self.get(self.baseurl)
soup = BeautifulSoup(res.text, "lxml")
firstlink = soup.article.a
self.assertTrue(firstlink.get("href"))
def test_frontpage_disabled_links(self):
res = self.get(self.baseurl)
soup = BeautifulSoup(res.text, "lxml")
local = not os.environ.get("FERENDA_TESTURL")
if local:
# don't test for broken links in the main content area
# since there will be many (in local testing we only
# download a small subset of laws and other resources, and
# the main content area contains links to other
# resources. Thus, in this testing scenario, they're
# expected to be missing
soup.find("div", "section-wrapper").decompose()
for link in soup.find_all("a"):
self.assertNotIn("invalid-link", link.attrs.get('class', []), "Link %s marked as invalid (not in DB)" % link.text)
def test_css_link(self):
for link in ("", "dom/nja/2015s180", "dom/hfd/2015/not/1"):
url = self.baseurl + link
res = self.get(url)
soup = BeautifulSoup(res.text, "lxml")
cssref = soup.find("link", rel="stylesheet", href=re.compile("ferenda.css$"))['href']
cssurl = urljoin(url, cssref)
self.assertEqual(self.baseurl + "rsrc/css/ferenda.css",
cssurl, "Error for %s" % url)
def test_sfs_outline(self):
res = self.get(self.baseurl + "1999:175")
soup = BeautifulSoup(res.text, "lxml")
outlines = soup.find("nav", id="toc").find_all("li")
# make sure the outline navigation is as expected and hasn't
# been mangled.
self.assertIn('Ett offentligt rättsinformationssystem', outlines[0].a.text)
subheadings = outlines[1].find_all("li")
self.assertEqual(9, len(subheadings))
self.assertIn('Personuppgifter', subheadings[-1].a.text)
def test_paragrafbeteckning(self):
for doc in ("1949:105", "1999:175"):
res = self.get(self.baseurl + doc)
soup = BeautifulSoup(res.text, "lxml")
self.assertTrue(soup.find("a", "paragrafbeteckning"),
"%s lacks marked-up paragrafbeteckning" % doc)
class TestPatching(TestLagen):
def test_file_has_been_patched(self):
# the encoding parameter might be a py3-ism
needle = codecs.encode("Fjrebgrp", encoding="rot13")# rot13 of a sensitive name
res = self.get(self.baseurl + "dom/nja/2002s35") # case containing sensitive info
res.raise_for_status() # req succeded
self.assertEqual(-1, res.text.find(needle)) # sensitive name is removed
self.assertTrue(res.text.index("alert alert-warning patchdescription")) # patching is advertised
class TestConNeg(TestLagen):
# this basically mirrors testWSGI.ConNeg
def test_basic(self):
res = self.get(self.baseurl + "1999:175")
self.assertEqual(200, res.status_code)
self.assertEqual("text/html; charset=utf-8", res.headers['Content-Type'])
def test_xhtml(self):
res = self.get(self.baseurl + "1999:175",
headers={'Accept': 'application/xhtml+xml'})
self.assertEqual(200, res.status_code)
self.assertEqual("application/xhtml+xml; charset=utf-8", res.headers['Content-Type'])
# variation: use file extension
res = self.get(self.baseurl + "1999:175.xhtml")
self.assertEqual(200, res.status_code)
self.assertEqual("application/xhtml+xml; charset=utf-8", res.headers['Content-Type'])
def test_rdf(self):
# basic test 3: accept: application/rdf+xml -> RDF statements (in XML)
res = self.get(self.baseurl + "1999:175",
headers={'Accept': 'application/rdf+xml'})
self.assertEqual(200, res.status_code)
self.assertEqual("application/rdf+xml; charset=utf-8", res.headers['Content-Type'])
# variation: use file extension
res = self.get(self.baseurl + "1999:175.rdf")
self.assertEqual(200, res.status_code)
self.assertEqual("application/rdf+xml; charset=utf-8", res.headers['Content-Type'])
def test_ntriples(self):
# transform test 4: accept: application/n-triples -> RDF statements (in NTriples)
# get the untransformed data to compare with
g = Graph().parse(data=self.get(self.baseurl + "1999:175.rdf", raise_for_status=True).text)
res = self.get(self.baseurl + "1999:175",
headers={'Accept': 'application/n-triples'})
self.assertEqual(200, res.status_code)
self.assertEqual("application/n-triples", res.headers['Content-Type'])
got = Graph().parse(data=res.content, format="nt")
self.assertEqualGraphs(g, got)
# variation: use file extension
res = self.get(self.baseurl + "1999:175.nt", raise_for_status=True)
self.assertEqual(200, res.status_code)
self.assertEqual("application/n-triples", res.headers['Content-Type'])
got = Graph()
got.parse(data=res.content, format="nt")
self.assertEqualGraphs(g, got)
def test_turtle(self):
# transform test 5: accept: text/turtle -> RDF statements (in Turtle)
g = Graph().parse(data=self.get(self.baseurl + "1999:175.rdf", raise_for_status=True).text)
res = self.get(self.baseurl + "1999:175",
headers={'Accept': 'text/turtle'})
self.assertEqual(200, res.status_code)
self.assertEqual("text/turtle; charset=utf-8", res.headers['Content-Type'])
got = Graph().parse(data=res.content, format="turtle")
self.assertEqualGraphs(g, got)
# variation: use file extension
res = self.get(self.baseurl + "1999:175.ttl")
self.assertEqual(200, res.status_code)
self.assertEqual("text/turtle; charset=utf-8", res.headers['Content-Type'])
got = Graph()
got.parse(data=res.content, format="turtle")
self.assertEqualGraphs(g, got)
def test_json(self):
# transform test 6: accept: application/json -> RDF statements (in JSON-LD)
g = Graph().parse(data=self.get(self.baseurl + "1999:175.rdf").text)
res = self.get(self.baseurl + "1999:175",
headers={'Accept': 'application/json'})
self.assertEqual(200, res.status_code)
self.assertEqual("application/json", res.headers['Content-Type'])
got = Graph().parse(data=res.text, format="json-ld")
self.assertEqualGraphs(g, got)
# variation: use file extension
res = self.get(self.baseurl + "1999:175.json")
self.assertEqual(200, res.status_code)
self.assertEqual("application/json", res.headers['Content-Type'])
got = Graph()
got.parse(data=res.text, format="json-ld")
self.assertEqualGraphs(g, got)
def test_unacceptable(self):
res = self.get(self.baseurl + "1999:175",
headers={'Accept': 'application/pdf'})
self.assertEqual(res.status_code, 406)
self.assertEqual("text/html", res.headers['Content-Type'])
# variation: unknown file extenison should also be unacceptable
res = self.get(self.baseurl + "1999:175.pdf")
self.assertEqual(res.status_code, 406)
self.assertEqual("text/html", res.headers['Content-Type'])
def test_extended_rdf(self):
# extended test 6: accept: "/data" -> extended RDF statements
g = Graph().parse(data=self.get(self.baseurl + "1999:175/data.rdf").text)
res = self.get(self.baseurl + "1999:175/data",
headers={'Accept': 'application/rdf+xml'})
self.assertEqual(200, res.status_code)
self.assertEqual("application/rdf+xml; charset=utf-8", res.headers['Content-Type'])
got = Graph().parse(data=res.text)
self.assertEqualGraphs(g, got)
def test_extended_ntriples(self):
# extended test 7: accept: "/data" + "application/n-triples" -> extended
# RDF statements in NTriples
g = Graph().parse(data=self.get(self.baseurl + "1999:175/data.rdf").text)
res = self.get(self.baseurl + "1999:175/data",
headers={'Accept': 'application/n-triples'})
self.assertEqual(200, res.status_code)
self.assertEqual("application/n-triples", res.headers['Content-Type'])
got = Graph().parse(data=res.text, format="nt")
self.assertEqualGraphs(g, got)
# variation: use file extension
res = self.get(self.baseurl + "1999:175/data.nt")
self.assertEqual(200, res.status_code)
self.assertEqual("application/n-triples", res.headers['Content-Type'])
got = Graph().parse(data=res.text, format="nt")
self.assertEqualGraphs(g, got)
def test_extended_turtle(self):
# extended test 7: accept: "/data" + "text/turtle" -> extended
# RDF statements in Turtle
g = Graph().parse(data=self.get(self.baseurl + "1999:175/data.rdf").text)
res = self.get(self.baseurl + "1999:175/data",
headers={'Accept': 'text/turtle'})
self.assertEqual(200, res.status_code)
self.assertEqual("text/turtle; charset=utf-8", res.headers['Content-Type'])
got = Graph().parse(data=res.content, format="turtle")
self.assertEqualGraphs(g, got)
# variation: use file extension
res = self.get(self.baseurl + "1999:175/data.ttl")
self.assertEqual(200, res.status_code)
self.assertEqual("text/turtle; charset=utf-8", res.headers['Content-Type'])
got = Graph().parse(data=res.content, format="turtle")
self.assertEqualGraphs(g, got)
def test_dataset_html(self):
res = self.get(self.baseurl + "dataset/sfs")
self.assertEqual(res.status_code, 200)
self.assertEqual("text/html; charset=utf-8", res.headers['Content-Type'])
def test_dataset_html_param(self):
res = self.get(self.baseurl + "dataset/sfs?titel=P")
self.assertEqual(res.status_code, 200)
self.assertEqual("text/html; charset=utf-8", res.headers['Content-Type'])
self.assertIn('Författningar som börjar på "P"', res.text)
def test_dataset_ntriples(self):
res = self.get(self.baseurl + "dataset/sitenews",
headers={'Accept': 'application/n-triples'})
self.assertEqual(res.status_code, 200)
#self.assertEqual("application/n-triples", res.headers['Content-Type'])
#Graph().parse(data=res.text, format="nt")
res = self.get(self.baseurl + "dataset/sitenews.nt")
self.assertEqual(res.status_code, 200)
self.assertEqual("application/n-triples", res.headers['Content-Type'])
Graph().parse(data=res.text, format="nt")
def test_dataset_turtle(self):
res = self.get(self.baseurl + "dataset/sitenews",
headers={'Accept': 'text/turtle'})
self.assertEqual(res.status_code, 200)
self.assertEqual("text/turtle; charset=utf-8", res.headers['Content-Type'])
Graph().parse(data=res.text, format="turtle")
res = self.get(self.baseurl + "dataset/sitenews.ttl")
self.assertEqual(res.status_code, 200)
self.assertEqual("text/turtle; charset=utf-8", res.headers['Content-Type'])
Graph().parse(data=res.text, format="turtle")
def test_dataset_xml(self):
res = self.get(self.baseurl + "dataset/sitenews",
headers={'Accept': 'application/rdf+xml'})
self.assertEqual(res.status_code, 200)
self.assertEqual("application/rdf+xml; charset=utf-8", res.headers['Content-Type'])
Graph().parse(data=res.text)
res = self.get(self.baseurl + "dataset/sitenews.rdf")
self.assertEqual(res.status_code, 200)
self.assertEqual("application/rdf+xml; charset=utf-8", res.headers['Content-Type'])
Graph().parse(data=res.text)
def test_facsimile_page_ie_accept(self):
# IE uses this accept header, which triggered a 406 error from wsgiapp
# res = self.get(self.baseurl + "utr/sou/1997:39/sid557.png",
res = self.get(self.baseurl + "dir/2016:15/sid1.png",
headers={'Accept': "text/html, application/xhtml+xml, image/jxr, */*"})
self.assertEqual(200, res.status_code)
self.assertEqual("image/png", res.headers["Content-Type"])
# assert trough first 8 bytes (magic number) that this really
# is a legit png
import binascii
self.assertEqual(b"89504e470d0a1a0a", binascii.hexlify(res.content[:8]))
class TestAnnotations(TestLagen):
def test_inbound_links(self):
res = self.get(self.baseurl + "1949:105/data", True,
headers={'Accept': 'application/rdf+xml'})
graph = Graph().parse(data=res.text, format="xml")
resource = graph.resource(URIRef("https://lagen.nu/1949:105"))
self.assertEqual(str(resource.value(DCTERMS.title)), "Tryckfrihetsförordning (1949:105)")
# Assert a few things about inbound relations
# see if an expected legal case + inbound statute reference is
# as expected
resource = graph.resource(URIRef("https://lagen.nu/1949:105#K3P3"))
resource2 = next(x for x in resource.objects(RPUBL.isLagrumFor) if x._identifier | |
float(cv2.getTickCount())
if scale_index > nos:
assert scale_index <= nos
# TODO: take out rd_start
rd_start = 0
rd_end = len(r_harlocs) - 1
j = 1
"""
Inspired from
https://stackoverflow.com/questions/17559140/matlab-twice-as-fast-as-numpy
BUT doesn't help in this case:
votes_space = np.asfortranarray(np.zeros( (len(RD), len(QD)) ))
"""
votes_space = np.zeros((len(r_harlocs), len(q_harlocs)))
# Make a distinct copy of HH from votes_space...
# TODO: use MAYBE even np.bool - OR take it out
HH = np.zeros((len(r_harlocs), len(q_harlocs)), dtype=np.int8)
# it helps to make more strict the threshold as the scale goes up
tolers = 0.1 - float(scale_index) / 100.0
maxdis = 3 + scale_index
maxori = 0.25
# TODO: I am using multiprocessing.Poll and return votes the dispatcher
# assembles the results, but the results are NOT the same with the serial
# case - although they look pretty decent, but they seem to be
# suboptimal - dp_alex returns suboptimal cost path for
# USE_MULTITHREADING == True instead of False.
# (Note: running under the same preconditions
# multiscale_quad_retrieval I got the same results in dp_alex().
"""
if False: #config.USE_MULTITHREADING == True:
global g
g.r_quads_tree = r_quads_tree
g.r_harlocs = r_harlocs
g.q_harlocs = q_harlocs
g.md_threshold = md_threshold
g.st_threshold = st_threshold
g.all_ori = all_ori
g.all_id = all_id
g.all_max = all_max
g.all_cen = all_cen
g.nos = nos
g.scale_index = scale_index
g.crop_flag = crop_flag
g.sequence = sequence
g.RD_start = RD_start
g.RD_end = RD_end
g.maxdis = maxdis
g.maxori = maxori
g.tolers = tolers
#Start worker processes to use on multi-core processor (able to run
# in parallel - no GIL issue if each core has it's own VM)
pool = multiprocessing.Pool(processes=config.numProcesses)
print("multiscale_quad_retrieval(): Spawned a pool of %d workers" %
config.numProcesses)
listParams = range(0, len(q_harlocs)) #!!!!TODO: use counterStep, config.initFrame[indexVideo]
#res = pool.map(iteration_standalone_mqr, listParams)
# See https://docs.python.org/2/library/multiprocessing.html#module-multiprocessing.pool
res = pool.map(func=iteration_standalone_mqr, iterable=listParams,
chunksize=1)
print("Pool.map returns %s" % str(res)) #x0.size + 1
# From https://medium.com/building-things-on-the-internet/40e9b2b36148
# close the pool and wait for the work to finish
pool.close()
pool.join()
# Doing the "reduce" phase after the workers have finished :)
assert len(res) == len(q_harlocs)
for query_frame, resE in enumerate(res):
resEIndex = resE[0]
resE = resE[1]
assert resEIndex == query_frame
# Gives: "ValueError: output operand requires a reduction, but reduction is not enabled"
#votes_space[:, query_frame - 1] = votes
votes_space[:, query_frame] = resE
for query_frame in range(len(q_harlocs)):
if crop_flag == 0:
HH[:, query_frame] = 1
else:
HH[:, query_frame] = spatial_consistency.spatial_consistency(space_xy,
qcen, len(r_harlocs), st_threshold, crop_flag)
try:
np.savez_compressed("votes_space%d" % scale_index, votes_space)
np.savez_compressed("HH%d" % scale_index, HH)
except:
common.DebugPrintErrorTrace()
return votes_space, HH
"""
"""
We substitute q - 1 with q, since we want
to number arrays from 0 (not from 1 like in Matlab).
"""
for query_frame in range(len(q_harlocs)):
common.DebugPrint("multiscale_quad_retrieval(): Starting iteration "
"query_frame = %d" % query_frame)
"""
We make pp reference the desired multiharloc list for the query video
frame query_frame
"""
pp = q_harlocs[query_frame]
points = pp[pp[:, 2] == scale_index, 0:2]
qout, qcen, qmaxdis, qori = findquads.findquads(points, md_threshold, 0)
if common.MY_DEBUG_STDOUT:
print("multiscale_quad_retrieval(): query_frame = %d, "
"qout.shape (number of quads for query frame query_frame) = "
"%s" % (query_frame, str(qout.shape)))
space_xy = np.zeros((qcen.shape[0], 2 * len(r_harlocs))) + np.nan
votes = np.zeros((len(r_harlocs), 1))
assert isinstance(tolers, float)
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): quads of query "
"frame %d are: " % query_frame)
common.DebugPrint(" qout = %s" % str(qout))
"""
Alex: for each quad (4 floats) of the query frame from Harris feature of
scale scale_index
Note: all_id stores the reference frame id for each quad descriptor.
"""
"""
We substitute queryFrameQuad - 1 with queryFrameQuad, since we want
to number arrays from 0 (not from 1 like in Matlab).
"""
for queryFrameQuad in range(qout.shape[0]):
common.DebugPrint("multiscale_quad_retrieval(): Starting iteration "
"queryFrameQuad = %d" % queryFrameQuad)
"""
Matlab's polymorphism is really bugging here: although it's
normally a float, tolers is considered to be a size 1 vector...
so len(tolers) == 1
"""
"""
We substitute tol_i - 1 with tol, since we want
to number arrays from 0 (not from 1 like in Matlab).
"""
for tol_i in range(1):
tol = tolers
# default for first PAMI with tol= 0.1 approximately
# NOTE: SciPy's KDTree finds a few more results, in some cases,
# than the Matlab code from Evangelidis.
# tol is a scalar representing the radius of the ball
if config.KDTREE_IMPLEMENTATION == 0:
idx = r_quads_tree.query_ball_point(qout[queryFrameQuad, :],
tol)
elif config.KDTREE_IMPLEMENTATION == 1:
pt = qout[queryFrameQuad, :]
pt = np.array([[pt[0], pt[1], pt[2], pt[3]]],
dtype=np.float32)
retval, idx, dists = r_quads_tree.radiusSearch(
query=pt,
radius=(tol ** 2),
maxResults=NUM_MAX_ELEMS,
params=search_params)
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): "
"radiusSearch's retval (at "
"query_frame=%d, queryFrameQuad=%d) "
"is %d" %
(query_frame, queryFrameQuad, retval))
idx = idx[0]
dists = dists[0]
"""
Note: retval is the number of neighbors returned from the
radiusSearch().
But the idx and the dists can have more elements than the
returned retval.
"""
idx = idx[: retval]
dists = dists[: retval]
if common.MY_DEBUG_STDOUT:
print("multiscale_quad_retrieval(): "
"qout[queryFrameQuad, :] = %s" %
str(qout[queryFrameQuad, :]))
print("multiscale_quad_retrieval(): "
"idx = %s" % str(idx))
print("multiscale_quad_retrieval(): "
"dists = %s" % str(dists))
print("multiscale_quad_retrieval(): "
"tol = %s" % str(tol))
if config.KDTREE_IMPLEMENTATION == 0:
print("multiscale_quad_retrieval(): "
"r_quads_tree.data[idx] = %s" %
str(r_quads_tree.data[idx]))
if common.MY_DEBUG_STDOUT:
a = qout[queryFrameQuad, :]
if config.KDTREE_IMPLEMENTATION == 0:
for myI, index in enumerate(idx):
b = r_quads_tree.data[index]
else:
pass
idx = np.array(idx)
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): "
"all_max.shape = %s" % str(all_max.shape))
common.DebugPrint("multiscale_quad_retrieval(): "
"qmaxdis.shape = %s" % str(qmaxdis.shape))
common.DebugPrint("multiscale_quad_retrieval(): "
"qmaxdis = %s" % str(qmaxdis))
common.DebugPrint("multiscale_quad_retrieval(): "
"qori.shape = %s" % str(qori.shape))
common.DebugPrint("multiscale_quad_retrieval(): "
"qori = %s" % str(qori))
if len(idx) == 0:
# NOT A GOOD IDEA: continue
dis_idx = np.array([])
ori_idx = np.array([])
else:
if common.MY_DEBUG_STDOUT:
print("multiscale_quad_retrieval(): "
"queryFrameQuad = %s" % str(queryFrameQuad))
print("multiscale_quad_retrieval(): "
"all_max[idx] = %s" % str(all_max[idx]))
print("multiscale_quad_retrieval(): "
"qmaxdis[queryFrameQuad] = %s" %
str(qmaxdis[queryFrameQuad]))
if USE_GPS_COORDINATES:
# We look only at a part of the reference video
"""
Since in some cases the video temporal alignment is
difficult to do due to similar portions in the
trajectory (see the drone videos, clip 3_some_lake)
we "guide" the temporal alignment by restricting
the reference frame search space - this is useful
when we have the geolocation (GPS) coordinate for
each frame.
"""
if common.MY_DEBUG_STDOUT:
print("multiscale_quad_retrieval(): "
"all_id = %s" % str(all_id))
if all_id.ndim == 2:
# TODO: put this at the beginning of the
# function
assert all_id.shape[1] == 1
"""
We flatten the array all_id
Note: We don't use order="F" since it's
basically 1-D array
"""
all_id = np.ravel(all_id)
# TODO: put start and end frame in config - or compute
# it from geolocation
sub_idx = np.logical_and((all_id[idx] >= 2030 - 928),
(all_id[idx] <= 2400 - 928))
idx = idx[sub_idx]
if common.MY_DEBUG_STDOUT:
print("multiscale_quad_retrieval(): "
"all_id = %s" % str(all_id))
print("multiscale_quad_retrieval(): "
"sub_idx = %s" % str(sub_idx))
print("multiscale_quad_retrieval(): "
"idx = %s" % str(idx))
if FILTER:
dis_idx = np.abs(
qmaxdis[queryFrameQuad] - all_max[idx]) < maxdis
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): "
"dis_idx = %s" % str(dis_idx))
idx = idx[dis_idx]
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): "
"idx (after idx = idx[dis_idx]) = "
"%s" % str(idx))
if FILTER:
ori_idx = np.abs(
qori[queryFrameQuad] - all_ori[idx]) < maxori
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): "
"ori_idx = %s" % str(ori_idx))
idx = idx[ori_idx]
# IMPORTANT ###################################################
# IMPORTANT ###################################################
# IMPORTANT ###################################################
# spatio-temporal consistency
# IMPORTANT ###################################################
# IMPORTANT ###################################################
# IMPORTANT ###################################################
if idx.size > 0:
if crop_flag == 0:
if FILTER:
"""
Alex: this is a simple procedure of eliminating
False Positive (FP) matches, as presented in
Section 4.2 of TPAMI 2013 paper.
Basically it filters out quad matches that have
centroids st_threshold away from the query quad.
Note: all_cen are the controids of all reference
quads.
"""
dy = qcen[queryFrameQuad, 0] - all_cen[idx, 0]
dx = qcen[queryFrameQuad, 1] - all_cen[idx, 1]
D = dy ** 2 + dx ** 2
co_idx = D < pow(st_threshold, 2)
idx = idx[co_idx]
else:
"""
We substitute iii - 1 with iii, since we want
to number arrays from 0 | |
#!/usr/bin/env python
"""
This module contains the core Composable subclasses that we made to handle SchemaTables and Fields.
SchemaTables will always be unquoted and thus directly extend Composable while Fields will always be quoted
and thus
"""
from typing import Union, List, Tuple, Dict, Any
from psycopg2 import sql, extensions
from general_utils.postgres_utils import LocalhostCursor
from general_utils.type_helpers import validate_is_int
class Field(sql.Identifier):
"""
A composable instance for a field that allows it to work as an element in a query using psycopg's sql module.
This is a clone of sql.Identifier except that it allows for an optional display name and it is hashable (as the
unquoted string value). This usefully allows us to store sets or dicts of Fields as if they were strings.
Note that by subclassing Identifier directly we inherit its useful ability to properly quote things with escaping.
"""
def __init__(self, name: str, display_name: Union[str, None] = None):
"""
Constructs a field with a raw name and an optional display name.
If no display_name is provided, then we will use the raw_name as the display name.
:param name: the name of the field (required)
:param display_name: the display name (optional)
"""
# This assigned the name to be "_wrapped"
super().__init__(name)
self._display_name = display_name if display_name is not None else name
def __hash__(self):
"""
Implements hash for the Field by using the hash of its wrapped string
:return:
"""
return self._wrapped.__hash__()
@property
def name(self) -> str:
"""
Returns the field name (unquoted)
:return: the field name
"""
return self._wrapped
@property
def display_name(self) -> str:
"""
Returns the display name of the field
:return: the field display name
"""
return self._display_name
def clone_with_new_display_name(self, display_name: str) -> 'Field':
"""
Clones this field and changes its display name (keeps the field name)
:return: a new field instance with the same name but the new field display name
"""
return Field(self.name, display_name)
class Schema(sql.Identifier):
"""
A composable instance for a schema that allows it to work as an element in a query using psycopg's sql module.
Right now a pure clone of Identifier and stores the constructor in the "_wrapped" property
"""
class Table(sql.Identifier):
"""
A composable instance for a schema that allows it to work as an element in a query using psycopg's sql module.
Right now a pure clone of Identifier and stores the constructor in the "_wrapped" property
Note that what we refer to as a "table" may actually be a view or a materialized view, but it queries just like a table
so for our purposes it's a "table"
This class also allows us extra features like the ability to get the metadata table if the table is a raw table.
"""
class SchemaTable(sql.Composed):
"""
A composable instance that takes schema and table strings (or and allows them to work together
This subclasses Composed since it involves two idenitifiers linked by the sql.SQL('.') and it
allows for convenient functionality to abstract away some nuances
(i.e. like how you must do "schema"."table" as "schema.table" does not work)
gives you a container object if you ever want to get the schema or the table.
This class also allows us to get the metadata SchemaTable if appropriate.
"""
def __init__(self, schema: Union[str, Schema], table: Union[str, Table]):
if isinstance(schema, Schema):
self._schema = schema
elif isinstance(schema, str):
self._schema = Schema(schema)
else:
raise TypeError("schema must be a str or Schema, not a {}".format(type(table)))
if isinstance(table, Table):
self._table = table
elif isinstance(table, str):
self._table = Table(table)
else:
raise TypeError("table must be a str or Table, not a {}".format(type(table)))
# Store it as a composed of the schema identifier, the table identifier and a period in between
super().__init__([self._schema, sql.SQL("."), self._table])
@property
def string(self) -> str:
"""
"Returns an unwrapped string version of the schema table for ease of printing
:return: a string version of the schema table
"""
return self._schema.string + "." + self._table.string
@property
def schema(self) -> Schema:
"""
Returns a clone of schema (so that it can't be changed)
:return: a clone of the schema of this SchemaTable
"""
return Schema(self._schema.string)
@property
def table(self) -> Table:
"""
Returns a clone of table (so that it can't be changed)
:return: a clone of the table of this SchemaTable
"""
return Table(self._table.string)
class SQLTypeStruct(sql.Composable):
"""
A composable instance that takes a postgres sqltype string and allows it to work as an element in a query
using psycopg's sql module
This is like the Field / sql.Identifier, except it is even more basic composable that does not put quotes around the
wrapped input (since SQL Types can't be quoted).
The goal for this is for use in dynamic CREATE TABLE queries where we give sql types without quotes, and you should
only construct this by using the class methods
"""
def as_string(self, context=None):
"""
Implement the abstract as_string to just give us the string that was given to be wrapped but without quotes.
This should be safe since we will only use one of the class methods
:param context: Don't need a context since it won't be quoted
:return: the string given in the constructor "wrapped" by this class
"""
return self._wrapped
class SQLType(object):
"""
An enum container for the SQLTypeStruct's objects.
In python the only way to make an object return enum-instances of itself is to use class methods which are a bit
bulky and so easier to just make this second object as the enum object with class properties that reference the
other object.
"""
TEXT = SQLTypeStruct("TEXT")
TEXT_PRIMARY_KEY = SQLTypeStruct("TEXT PRIMARY KEY")
DATE = SQLTypeStruct("DATE")
TIMESTAMP = SQLTypeStruct("TIMESTAMP")
JSONB = SQLTypeStruct("JSONB")
JSONB_DEFAULT_EMPTY_ARRAY = SQLTypeStruct("JSONB DEFAULT '[]'::json")
JSONB_DEFAULT_EMPTY_OBJ = SQLTypeStruct("JSONB DEFAULT '{}'::json")
BOOLEAN = SQLTypeStruct("BOOLEAN")
BOOLEAN_DEFAULT_TRUE = SQLTypeStruct("BOOLEAN DEFAULT TRUE")
BOOLEAN_DEFAULT_FALSE = SQLTypeStruct("BOOLEAN DEFAULT FALSE")
INTEGER = SQLTypeStruct("INTEGER")
INTEGER_DEFAULT_ZERO = SQLTypeStruct("INTEGER DEFAULT 0")
DOUBLE_PRECISION = SQLTypeStruct("DOUBLE PRECISION")
NUMERIC = SQLTypeStruct("NUMERIC")
@staticmethod
def NUMERIC_WITH_PRECISION_SCALE(precision: int, scale: int):
if precision is None or scale is None:
raise Exception("Must specify either both precision and scale or neither")
else:
return SQLTypeStruct("NUMERIC ({}, {})".format(precision, scale))
def get_column_names(schema_table: SchemaTable, cursor: extensions.cursor) -> List[str]:
"""
Gets a list of all columns (from the information schema) for a given schema and table in the ordinal order
:param schema_table: the SchemaTable object that we want to get the columns_from
:param cursor: a cursor for where to execute this query
:return: a list of all table columns
"""
schema_name = schema_table.schema.string
table_name = schema_table.table.string
cursor.execute("SELECT column_name FROM information_schema.columns "
"WHERE table_schema = %s AND table_name = %s ORDER BY ordinal_position",
(schema_name, table_name))
return [x[0] for x in cursor.fetchall()]
def execute_values_insert_query(schema_table: SchemaTable) -> sql.Composable:
"""
This helper function takes a SchemaTable and creates a generic insert query for use with the execute values method
(i.e. with the parameter %s following the word values
:param schema_table: the SchemaTable object to insert
:return: a Composable wrapper with the insert query
"""
return sql.SQL("""
INSERT INTO {} VALUES %s
""").format(schema_table)
def get_row_count(schema_table: SchemaTable, cursor: extensions.cursor) -> int:
"""
Given a SchemaTable and a cursor, this simple utility will run a SELECT COUNT(*) on the object and return an int
:param schema_table: the SchemaTable object that we want to compute the row count
:param cursor: a cursor for where to execute this query
:return: the number of rows in the schema table object after querying the database with the cursor
"""
cursor.execute(sql.SQL("""
SELECT COUNT(*) FROM {}
""").format(schema_table))
count = cursor.fetchone()[0] # grab the first element of the tuple that is returned
validate_is_int(count)
return count
def fetch_all_records(schema_table: SchemaTable, cursor: extensions.cursor) -> List:
"""
Given a SchemaTable and a cursor, this simple utility will run a SELECT * on the object and return the full thing in
memory. Recommended for use only on small objects!
:param schema_table: the SchemaTable object that we want to fetch all from
:param cursor: a cursor for where to execute this query
:return: a list of tuple records with the table in memory
"""
cursor.execute(sql.SQL("""
SELECT * FROM | |
'Use multi-part upload instead.' % bos.MAX_APPEND_OBJECT_LENGTH)
params = {'append': ''}
if offset is not None:
params['offset'] = offset
return self._send_request(
http_methods.POST,
bucket_name,
key,
body=data,
headers=headers,
params=params,
config=config)
@required(bucket_name=(str, str),
key=str,
data=(str, str))
def append_object_from_string(self, bucket_name, key, data,
content_md5=None,
offset=None,
content_type=None,
user_metadata=None,
content_sha256=None,
storage_class=storage_class.STANDARD,
user_headers=None,
config=None):
"""
Create an appendable object and put content of string to the object
or add content of string to an appendable object
"""
if isinstance(data, str):
data = data.encode(bceutils.DEFAULT_ENCODING)
fp = None
try:
fp = io.StringIO(data)
if content_md5 is None:
content_md5 = bceutils.get_md5_from_fp(
fp, buf_size=self._get_config_parameter(config, 'recv_buf_size'))
return self.append_object(bucket_name=bucket_name,
key=key,
data=fp,
content_md5=content_md5,
content_length=len(data),
offset=offset,
content_type=content_type,
user_metadata=user_metadata,
content_sha256=content_sha256,
storage_class=storage_class,
user_headers=user_headers,
config=config)
finally:
if fp is not None:
fp.close()
@required(bucket_name=(str, str),
key=str,
data=object,
content_length=(int, int),
content_md5=str)
def put_object(self, bucket_name, key, data,
content_length,
content_md5,
content_type=None,
content_sha256=None,
user_metadata=None,
storage_class=storage_class.STANDARD,
user_headers=None,
config=None):
"""
Put object and put content of file to the object
:type bucket: string
:param bucket: None
:type key: string
:param key: None
:type fp: FILE
:param fp: None
:type file_size: long
:type offset: long
:type content_length: long
:return:
**HTTP Response**
"""
headers = self._prepare_object_headers(
content_length=content_length,
content_md5=content_md5,
content_type=content_type,
content_sha256=content_sha256,
user_metadata=user_metadata,
storage_class=storage_class,
user_headers=user_headers)
buf_size = self._get_config_parameter(config, 'recv_buf_size')
if content_length > bos.MAX_PUT_OBJECT_LENGTH:
raise ValueError('Object length should be less than %d. '
'Use multi-part upload instead.' % bos.MAX_PUT_OBJECT_LENGTH)
return self._send_request(
http_methods.PUT,
bucket_name,
key,
body=data,
headers=headers,
config=config)
@required(bucket=(str, str), key=str, data=(str, str))
def put_object_from_string(self, bucket, key, data,
content_md5=None,
content_type=None,
content_sha256=None,
user_metadata=None,
storage_class=storage_class.STANDARD,
user_headers=None,
config=None):
"""
Create object and put content of string to the object
:type bucket: string
:param bucket: None
:type key: string
:param key: None
:type input_content: string
:param input_content: None
:type options: dict
:param options: None
:return:
**HTTP Response**
"""
if isinstance(data, str):
data = data.encode(bceutils.DEFAULT_ENCODING)
fp = None
try:
fp = io.StringIO(data)
if content_md5 is None:
content_md5 = bceutils.get_md5_from_fp(
fp, buf_size=self._get_config_parameter(config, 'recv_buf_size'))
return self.put_object(bucket, key, fp,
content_length=len(data),
content_md5=content_md5,
content_type=content_type,
content_sha256=content_sha256,
user_metadata=user_metadata,
storage_class=storage_class,
user_headers=user_headers,
config=config)
finally:
if fp is not None:
fp.close()
@required(bucket=str, key=str, file_name=str)
def put_object_from_file(self, bucket, key, file_name,
content_length=None,
content_md5=None,
content_type=None,
content_sha256=None,
user_metadata=None,
storage_class=storage_class.STANDARD,
user_headers=None,
config=None):
"""
Put object and put content of file to the object
:type bucket: string
:param bucket: None
:type key: string
:param key: None
:type file_name: string
:param file_name: None
:type options: dict
:param options: None
:return:
**HttpResponse Class**
"""
fp = open(file_name, 'rb')
try:
if content_length is None:
fp.seek(0, os.SEEK_END)
content_length = fp.tell()
fp.seek(0)
if content_md5 is None:
recv_buf_size = self._get_config_parameter(
config, 'recv_buf_size')
content_md5 = bceutils.get_md5_from_fp(fp, length=content_length,
buf_size=recv_buf_size)
if content_type is None:
content_type = bceutils.guess_content_type_by_file_name(
file_name)
return self.put_object(bucket, key, fp,
content_length=content_length,
content_md5=content_md5,
content_type=content_type,
content_sha256=content_sha256,
user_metadata=user_metadata,
storage_class=storage_class,
user_headers=user_headers,
config=config)
finally:
fp.close()
@required(source_bucket_name=(str, str),
source_key=str,
target_bucket_name=(str, str),
target_key=str)
def copy_object(self,
source_bucket_name, source_key,
target_bucket_name, target_key,
etag=None,
content_type=None,
user_metadata=None,
storage_class=storage_class.STANDARD,
user_headers=None,
copy_object_user_headers=None,
config=None):
"""
Copy one object to another object
:type source_bucket: string
:param source_bucket: None
:type source_key: string
:param source_key: None
:type target_bucket: string
:param target_bucket: None
:type target_key: string
:param target_key: None
:return:
**HttpResponse Class**
"""
headers = self._prepare_object_headers(
content_type=content_type,
user_metadata=user_metadata,
storage_class=storage_class,
user_headers=user_headers)
headers[http_headers.BCE_COPY_SOURCE] = bceutils.normalize_string(
'/%s/%s' % (source_bucket_name, source_key), False)
if etag is not None:
headers[http_headers.BCE_COPY_SOURCE_IF_MATCH] = etag
if user_metadata is not None:
headers[http_headers.BCE_COPY_METADATA_DIRECTIVE] = 'replace'
else:
headers[http_headers.BCE_COPY_METADATA_DIRECTIVE] = 'copy'
if copy_object_user_headers is not None:
try:
headers = BosClient._get_user_header(
headers, copy_object_user_headers, True)
except Exception as e:
raise e
return self._send_request(
http_methods.PUT,
target_bucket_name,
target_key,
headers=headers,
config=config,
body_parser=bos_handler.parse_copy_object_response)
@required(bucket_name=(str, str))
def delete_object(self, bucket_name, key, config=None):
"""
Delete Object
:type bucket: string
:param bucket: None
:type key: string
:param key: None
:return:
**HttpResponse Class**
"""
return self._send_request(http_methods.DELETE, bucket_name, key, config=config)
@required(bucket_name=(str, str), key_list=list)
def delete_multiple_objects(self, bucket_name, key_list, config=None):
"""
Delete Multiple Objects
:type bucket: string
:param bucket: None
:type key_list: string list
:param key_list: None
:return:
**HttpResponse Class**
"""
key_list_json = [{'key': k} for k in key_list]
return self._send_request(http_methods.POST,
bucket_name,
body=json.dumps({'objects': key_list_json}),
params={'delete': ''},
config=config)
@required(source_bucket=(str, str),
target_bucket=(str, str),
target_prefix=(str, str))
def put_bucket_logging(self,
source_bucket,
target_bucket,
target_prefix=None,
config=None):
"""
Put Bucket Logging
:type source_bucket: string
:param source_bucket: None
:type target_bucket: string
:param target_bucket: None
:return:
**HttpResponse Class**
"""
return self._send_request(http_methods.PUT,
source_bucket,
params={'logging': ''},
body=json.dumps({'targetBucket': target_bucket,
'targetPrefix': target_prefix}),
config=config)
@required(bucket_name=(str, str))
def get_bucket_logging(self, bucket_name, config=None):
"""
Get Bucket Logging
:type bucket_name: string
:param bucket_name: None
:return:
**HttpResponse Class**
"""
return self._send_request(http_methods.GET,
bucket_name,
params={'logging': ''},
config=config)
@required(bucket_name=(str, str))
def delete_bucket_logging(self, bucket_name, config=None):
"""
Delete Bucket Logging
:type bucket_name: string
:param bucket_name: None
:return:
**HttpResponse Class**
"""
return self._send_request(http_methods.DELETE,
bucket_name,
params={'logging': ''},
config=config)
@required(bucket_name=(str, str))
def initiate_multipart_upload(self,
bucket_name,
key,
storage_class=storage_class.STANDARD,
user_headers=None,
config=None):
"""
Initialize multi_upload_file.
:type bucket: string
:param bucket: None
:type key: string
:param key: None
:return:
**HttpResponse**
"""
headers = {}
if storage_class is not None:
headers[http_headers.BOS_STORAGE_CLASS] = storage_class
if user_headers is not None:
try:
headers = BosClient._get_user_header(
headers, user_headers, False)
except Exception as e:
raise e
return self._send_request(
http_methods.POST,
bucket_name,
key,
headers=headers,
params={'uploads': ''},
config=config)
@required(bucket_name=(str, str),
key=str,
upload_id=(str, str),
part_number=int,
part_size=(int, int),
part_fp=object)
def upload_part(self, bucket_name, key, upload_id,
part_number, part_size, part_fp, part_md5=None,
config=None):
"""
Upload a part.
:type bucket: string
:param bucket: None
:type key: string
:param key: None
:type upload_id: string
:param upload_id: None
:type part_number: int
:param part_number: None
:type part_size: int or long
:param part_size: None
:type part_fp: file pointer
:param part_fp: not None
:type part_md5: str
:param part_md5: None
:type config: dict
:param config: None
:return:
**HttpResponse**
"""
if part_number < bos.MIN_PART_NUMBER or part_number > bos.MAX_PART_NUMBER:
raise ValueError('Invalid part_number %d. The valid range is from %d to %d.' % (
part_number, bos.MIN_PART_NUMBER, bos.MAX_PART_NUMBER))
if part_size > bos.MAX_PUT_OBJECT_LENGTH:
raise ValueError('Single part length should be less than %d. '
% bos.MAX_PUT_OBJECT_LENGTH)
headers = {http_headers.CONTENT_LENGTH: part_size,
http_headers.CONTENT_TYPE: http_content_types.OCTET_STREAM}
if part_md5 is not None:
headers[http_headers.CONTENT_MD5] = part_md5
return self._send_request(
http_methods.PUT,
bucket_name,
key,
body=part_fp,
headers=headers,
params={'partNumber': part_number, 'uploadId': upload_id},
config=config)
@required(source_bucket_name=(str, str),
source_key=str,
target_bucket_name=(str, str),
target_key=str,
upload_id=(str, str),
part_number=int,
part_size=(int, int),
offset=(int, int))
def upload_part_copy(self,
source_bucket_name, source_key,
target_bucket_name, target_key,
upload_id, part_number, part_size, offset,
etag=None,
content_type=None,
user_metadata=None,
config=None):
"""
Copy part.
:type source_bucket_name: string
:param source_bucket_name: None
:type source_key: string
:param source_key: None
:type target_bucket_name: string
:param target_bucket_name: None
:type target_key: string
:param target_key: None
:type upload_id: string
:param upload_id: None
:return:
**HttpResponse**
"""
headers = self._prepare_object_headers(
content_type=content_type,
user_metadata=user_metadata)
headers[http_headers.BCE_COPY_SOURCE] = bceutils.normalize_string(
"/%s/%s" % (source_bucket_name, source_key), False)
range = """bytes=%d-%d""" % (offset, offset + part_size - 1)
headers[http_headers.BCE_COPY_SOURCE_RANGE] = range
if etag is not None:
headers[http_headers.BCE_COPY_SOURCE_IF_MATCH] = etag
return self._send_request(
http_methods.PUT,
target_bucket_name,
target_key,
headers=headers,
params={'partNumber': part_number, 'uploadId': upload_id},
config=config)
@required(bucket_name=(str, str),
key=str,
upload_id=(str, str),
part_number=int,
part_size=(int, int),
file_name=str,
offset=(int, int))
def upload_part_from_file(self, bucket_name, key, upload_id,
part_number, part_size, file_name, offset, part_md5=None,
config=None):
"""
:param bucket_name:
:param key:
:param upload_id:
:param part_number:
:param part_size:
:param file_name:
:param offset:
:param part_md5:
:param config:
:return:
"""
f = open(file_name, 'rb')
try:
f.seek(offset)
return self.upload_part(bucket_name, key, upload_id, part_number, part_size, f,
part_md5=part_md5, config=config)
finally:
f.close()
@required(bucket_name=(str, str),
key=str,
upload_id=(str, str),
part_list=list)
def complete_multipart_upload(self, bucket_name, key,
upload_id, part_list,
user_metadata=None,
config=None):
"""
After finish all the task, complete multi_upload_file.
:type bucket: string
:param bucket: None
:type key: string
:param key: None
:type upload_id: string
:param upload_id: None
:type part_list: list
:param part_list: None
:return:
**HttpResponse**
"""
headers = self._prepare_object_headers(
content_type=http_content_types.JSON,
user_metadata=user_metadata)
return self._send_request(
http_methods.POST,
bucket_name,
key,
body=json.dumps({'parts': part_list}),
headers=headers,
params={'uploadId': upload_id})
@required(bucket_name=(str, str), key=str, upload_id=(str, str))
def abort_multipart_upload(self, bucket_name, key, upload_id, config=None):
"""
Abort upload a part which is being uploading.
:type bucket: string
:param bucket: None
:type key: string
:param key: None
:type upload_id: string
:param upload_id: None
:return:
**HttpResponse**
"""
return self._send_request(http_methods.DELETE, bucket_name, key,
params={'uploadId': upload_id})
@required(bucket_name=(str, str), key=str, upload_id=(str, str))
def list_parts(self, bucket_name, key, upload_id,
max_parts=None, part_number_marker=None,
config=None):
"""
List all the parts that have been upload success.
:type bucket: string
:param bucket: None
:type key: string
:param key: None
:type upload_id: string
:param upload_id: None
:type max_parts: int
:param max_parts: None
:type part_number_marker: string
:param part_number_marker: None
:return:
**_ListPartsResponse Class**
"""
params = {'uploadId': upload_id}
if max_parts is not None:
params['maxParts'] = max_parts
if part_number_marker is not None:
params['partNumberMarker'] = part_number_marker
return self._send_request(http_methods.GET, bucket_name, key, params=params, config=config)
@required(bucket_name=(str, str), key=str, upload_id=(str, str))
| |
(8,"(/d 1d 2)",None)]:
T(f"{n}-gon {l}{'' if s is None else ' slanted '+str(s)}",
f"""
({'' if s is None else slant(s)}
(loop i {n}
(move {l} (/a 1a {n}))))
""",
needToTrain=True)
for n,l,s in [(3,"(*d 1l 2)",None),
(4,"(*d 1d 4)",None),
(5,"(*d 1d 2)",None),
(6,"1l",None),
(7,"(*d 1d 3)",None),
(8,"1l",3)]:
T(f"{n}-gon {l}{'' if s is None else ' slanted '+str(s)}",
f"""
({'' if s is None else slant(s)}
(loop i {n}
(move {l} (/a 1a {n}))))
""",
needToTrain=False)
T("upwards", "((move 0d (/a 1a 4)) (move 1d 0a))",
needToTrain=True)
T("right angle", "((move (*d 1d 2) (/a 1a 4)) (move 1d 0a))",
needToTrain=True)
T("right angle epsilon", "((move epsilonLength (/a 1a 4)) (move epsilonLength 0a))",
needToTrain=True)
T("line segment", "(move 1d 0a)",
needToTrain=True)
T("square slanted by 2pi/3",
"""((move 0d (/a 1a 3))
(loop k 4 (move 1d (/a 1a 4))))""",
needToTrain=True)
T("semicircle slanted by 2pi/5",
"""((move 0d (/a 1a 5))
(loop i infinity
(move (*d epsilonLength 4) epsilonAngle)))""",
needToTrain=True)
T("Greek spiral slanted by 2pi/6",
"""((move 0d (/a 1a 6))
(loop i 7 (move (*l 1l i) (/a 1a 4))))""",
needToTrain=True)
T("Hook slanted by 2pi/7",
"""((move 0d (/a 1a 7))
(move 1d 0a)
(loop i infinity
(move (*d epsilonLength 4) epsilonAngle)))""",
needToTrain=True)
T("""slanted line""",
"""((move 0d (/a 1a 8))
(move (*d 1l 3) 0a))""",
needToTrain=True)
for i in [6,7,8,9]:
T("Greek spiral %d"%i,
"""
(loop i %d
(move (*l 1l i) (/a 1a 4)))
"""%i,
needToTrain=i in [7,8])
for i in [2,3,4,5]:
T("smooth spiral %d"%i,
"""
(loop i infinity
(move (*d epsilonLength i) (*a epsilonAngle %d)))
"""%i,
needToTrain=i in [3,5])
T("smooth spiral 4 slanted by 2pi/2",
"""
((move 0d (/a 1a 2))
(loop i infinity
(move (*d epsilonLength i) (*a epsilonAngle 4))))
""",
needToTrain=True)
for i in [3,5,7,9]:
T("star %d"%i,
"""
(loop i %d (move (*d 1d 4) (-a (/a 1a 2) (/a (/a 1a 2) %s))))
"""%(i,i),
needToTrain=i in [5,9])
T("leaf iteration 1.1",
"""
(loop i infinity (move epsilonDistance (/a epsilonAngle 2)))
""",
needToTrain=True)
T("leaf iteration 1.2",
"""
((move 0d (/a 1a 2))
(loop i infinity (move epsilonDistance (/a epsilonAngle 2))))
""",
needToTrain=True)
T("leaf iteration 2.1",
"""
(loop n 2
(loop i infinity (move epsilonDistance (/a epsilonAngle 2)))
(move 0d (/a 1a 4)))
""",
needToTrain=True)
T("leaf iteration 2.2",
"""
((move 0d (/a 1a 2))
(loop n 2
(loop i infinity (move epsilonDistance (/a epsilonAngle 2)))
(move 0d (/a 1a 4))))
""",
needToTrain=True)
for n in range(3,8):
T("flower %d"%n,
"""
(loop j %d
(loop n 2
(loop i infinity (move epsilonDistance (/a epsilonAngle 2)))
(move 0d (/a 1a 4)))
(move 0d (/a 1a %d)))
"""%(n,n),
needToTrain=n in range(3,5))
for n in [5,6]:
T("staircase %d"%n,
"""
(loop i %d
(move 1d (/a 1a 4))
(move 1d (/a 1a 4))
(move 0d (/a 1a 2)))
"""%n,
needToTrain=n in [5])
for n in range(1,6):
T("blocks zigzag %d"%n,
"""
(loop i %d
(move 1d (/a 1a 4)) (move 1d (/a 1a 4))
(move 1d (+a (/a 1a 2) (/a 1a 4))) (move 1d (+a (/a 1a 2) (/a 1a 4))))
"""%n,
needToTrain=n in [1,2,3])
for n in [3,4]:#range(1,5):
T("diagonal zigzag %d"%n,
"""
((move 0d (/a 1a 8))
(loop i %d
(move 1d (/a 1a 4))
(move 1d (+a (/a 1a 2) (/a 1a 4)))))
"""%n,
needToTrain=n == 4)
for n in [1,2,3,4,5,6]:
T("right semicircle of size %d"%n,
"""
(loop i infinity
(move (*d epsilonLength %d) (-a 0a epsilonAngle)))
"""%n,
needToTrain=n%2 == 0)
T("left semicircle of size %d"%n,
f"""
({'' if n != 1 else slant(8)}
(loop i infinity
(move (*d epsilonLength {n}) epsilonAngle)))
""",
needToTrain=n%2 == 1)
T("circle of size %d"%n,
"""
((loop i infinity
(move (*d epsilonLength %d) epsilonAngle))
(loop i infinity
(move (*d epsilonLength %d) epsilonAngle)))
"""%(n,n),
needToTrain=n in [1,4,3,5,6])
for n in [5,6]:
T("%d enclosed circles"%n,
"""
(loop j %d
(loop i infinity
(move (*d epsilonLength j) epsilonAngle))
(loop i infinity
(move (*d epsilonLength j) epsilonAngle)))"""%n,
needToTrain=n == 5)
for n,l in [(4,2),
(5,3),
(6,4),
(3,1)]:
T("%d-circle flower l=%d"%(n,l),
"""
(loop j %d
(move 0d (/a 1a %d))
(embed (loop i infinity
(move (*d epsilonLength %d) epsilonAngle))
(loop i infinity
(move (*d epsilonLength %d) epsilonAngle))))"""%(n,n,l,l),
needToTrain=(n,l) in [(6,4),(3,1)])
for n,l in [(3,1),(2,2),(1,3),
(2,1),(1,2),(1,1)]:
T("%d-semicircle sequence L=%d"%(n,l),
"""
(loop j %d
(loop i infinity
(move (*d epsilonLength %d) epsilonAngle))
(loop i infinity
(move (*d epsilonLength %d) (-a 0a epsilonAngle))))
"""%(n,l,l),
needToTrain=(n,l) in [(3,1),(2,2),(1,3)])
for n,l in [(2,"1d"),
(3,"1d")]:
T("row of %d circles"%n,
"""
(loop j %d
(embed (loop k 2 (loop i infinity (move epsilonLength epsilonAngle))))
(p (move %s 0a)))"""%(n,l),
needToTrain=n == 2)
for n,l in [(2,"1d"),
(3,"1d")]:
T("row of %d lines"%n,
"""
(loop j %d
(move 1d 0a)
(p (move %s 0a)))"""%(n,l),
needToTrain=n == 2)
T("line next to semicircle",
"""
((move 1d 0a) (p (move 1d 0a)) (loop i infinity (move epsilonLength epsilonAngle)))
""",
needToTrain=True)
for n,l in [(3,"(/d 1d 2)"),
(4,"(/d 1d 3)")]:
T("%d dashed lines of size %s"%(n,l),
"""(loop i %d (p (move 1d 0a)) (move %s 0a))"""%(n,l),
needToTrain=n == 3)
T("broken circle",
"""
((loop i infinity (move epsilonLength epsilonAngle)) (p (move 1d 0a)) (loop i infinity (move epsilonLength epsilonAngle)))
""",
needToTrain=True)
T("circle next to semicircle",
"""
((loop i infinity (move epsilonLength epsilonAngle))
(loop i infinity (move epsilonLength epsilonAngle))
(p (move 1d 0a))
(loop i infinity (move epsilonLength epsilonAngle)))
""",
needToTrain=True)
T("semicircle next to square",
"""
((loop i infinity (move epsilonLength epsilonAngle))
(p (move 1d 0a))
(loop i infinity (move 1d (/a 1a 4))))
""",
needToTrain=False)
T("circle next to square",
"""
((loop i infinity (move epsilonLength epsilonAngle))
(loop i infinity (move epsilonLength epsilonAngle))
(p (move 1d 0a))
(loop i infinity (move 1d (/a 1a 4))))
""",
needToTrain=False)
T("circle next to line",
"""
((loop i infinity (move epsilonLength epsilonAngle))
(loop i infinity (move epsilonLength epsilonAngle))
(p (move 1d 0a))
(move 1d 0a))
""",
needToTrain=True)
T("line next to circle",
"""
((move 1d 0a)
(p (move 1d 0a))
(loop i infinity (move epsilonLength epsilonAngle))
(loop i infinity (move epsilonLength epsilonAngle))
(move 1d 0a))
""",
needToTrain=True)
for n,l in [(4,"1d"),
(5,"1d")]:
T("row of %d dashes"%n,
"""
(loop j %d
(embed (move 0d (/a 1a 4)) (move 1d 0a))
(p (move %s 0a)))"""%(n,l),
needToTrain=n == 4)
for n,l in [(5,"1d"),(6,"1d")]:
T("row of %d semicircles"%n,
"""
(loop j %d
(embed (loop i infinity (move epsilonLength epsilonAngle)))
(p (move %s 0a)))"""%(n,l),
needToTrain=n == 5)
with random_seed(42): # carefully selected for maximum entropy
for n in [3,4,5,6,7]:
body = {"empty": "(move 1d 0a)",
"spiral": "(loop i infinity (move (*d epsilonLength i) (*a epsilonAngle 2)))",
"dashed": "(p (move 1d 0a)) (move 1d 0a)",
"circle": "(move 1d 0a) (loop k 2 (loop i infinity (move epsilonLength epsilonAngle)))",
"lonely circle": "(p (move 1d 0a)) (loop k 2 (loop i infinity (move epsilonLength epsilonAngle)))",
"square dashed": "(p (move 1d 0a)) (loop s 4 (move 1d (/a 1a 4)))",
"square": "(move 1d 0a) (loop s 4 (move 1d (/a 1a 4)))",
"close large semicircle": "(loop i infinity (move (*d epsilonLength 2) epsilonAngle))",
"close semicircle": "(loop i infinity (move epsilonLength epsilonAngle))",
"semicircle": "(move 1d 0a) (loop i infinity (move epsilonLength epsilonAngle))",
"double dashed": "(p (move 1d 0a)) (move 1d 0a) (p (move 1d 0a)) (move 1d 0a)",
"Greek": "(loop i 3 (move (*l 1l i) (/a 1a 4)))"}
for name in body:
if name == "spiral" and n not in [3,5]: continue
if name == "square" and n not in [5,3,6,7]: continue
if name == "semicircle" and n not in [5,3,4,6]: continue
if name == "Greek" and n not in [3,5]: continue
if name == "double dashed" and n not in [6,4,3]: continue
mustTrain = False
mustTrain = mustTrain or (n == 3 and name == "Greek")
mustTrain = mustTrain or (n == 7 and name == "empty")
mustTrain = mustTrain or (n == 5 and name == "dashed")
mustTrain = mustTrain or (n == 7 and name == "circle")
mustTrain | |
"""Test gates defined in `qibo/core/gates.py`."""
import pytest
import numpy as np
from qibo import gates, K
from qibo.config import raise_error
from qibo.tests.utils import random_state, random_density_matrix
def apply_gates(gatelist, nqubits=None, initial_state=None):
if initial_state is None:
state = K.qnp.zeros(2 ** nqubits)
state[0] = 1
elif isinstance(initial_state, np.ndarray):
state = np.copy(initial_state)
if nqubits is None:
nqubits = int(np.log2(len(state)))
else: # pragma: no cover
assert nqubits == int(np.log2(len(state)))
else: # pragma: no cover
raise_error(TypeError, "Invalid initial state type {}."
"".format(type(initial_state)))
state = K.cast(state)
for gate in gatelist:
state = gate(state)
return state
def test__control_unitary(backend):
matrix = K.cast(np.random.random((2, 2)))
gate = gates.Unitary(matrix, 0)
unitary = gate._control_unitary(matrix)
target_unitary = np.eye(4, dtype=K._dtypes.get('DTYPECPX'))
target_unitary[2:, 2:] = K.to_numpy(matrix)
K.assert_allclose(unitary, target_unitary)
with pytest.raises(ValueError):
unitary = gate._control_unitary(np.random.random((16, 16)))
def test_h(backend):
final_state = apply_gates([gates.H(0), gates.H(1)], nqubits=2)
target_state = np.ones_like(final_state) / 2
K.assert_allclose(final_state, target_state)
def test_x(backend):
final_state = apply_gates([gates.X(0)], nqubits=2)
target_state = np.zeros_like(final_state)
target_state[2] = 1.0
K.assert_allclose(final_state, target_state)
def test_y(backend):
final_state = apply_gates([gates.Y(1)], nqubits=2)
target_state = np.zeros_like(final_state)
target_state[1] = 1j
K.assert_allclose(final_state, target_state)
def test_z(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.Z(0)], nqubits=2)
target_state = np.ones_like(final_state) / 2.0
target_state[2] *= -1.0
target_state[3] *= -1.0
K.assert_allclose(final_state, target_state)
def test_s(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.S(1)], nqubits=2)
target_state = np.array([0.5, 0.5j, 0.5, 0.5j])
K.assert_allclose(final_state, target_state)
def test_sdg(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.SDG(1)], nqubits=2)
target_state = np.array([0.5, -0.5j, 0.5, -0.5j])
K.assert_allclose(final_state, target_state)
def test_t(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.T(1)], nqubits=2)
target_state = np.array([0.5, (1 + 1j) / np.sqrt(8),
0.5, (1 + 1j) / np.sqrt(8)])
K.assert_allclose(final_state, target_state)
def test_tdg(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.TDG(1)], nqubits=2)
target_state = np.array([0.5, (1 - 1j) / np.sqrt(8),
0.5, (1 - 1j) / np.sqrt(8)])
K.assert_allclose(final_state, target_state)
def test_identity(backend):
gatelist = [gates.H(0), gates.H(1), gates.I(0), gates.I(1)]
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.ones_like(final_state) / 2.0
K.assert_allclose(final_state, target_state)
gatelist = [gates.H(0), gates.H(1), gates.I(0, 1)]
final_state = apply_gates(gatelist, nqubits=2)
K.assert_allclose(final_state, target_state)
def test_align(backend):
gate = gates.Align(0, 1)
gatelist = [gates.H(0), gates.H(1), gate]
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.ones_like(final_state) / 2.0
K.assert_allclose(final_state, target_state)
gate_matrix = gate._construct_unitary()
K.assert_allclose(gate_matrix, np.eye(4))
# :class:`qibo.core.cgates.M` is tested seperately in `test_measurement_gate.py`
def test_rx(backend):
theta = 0.1234
final_state = apply_gates([gates.H(0), gates.RX(0, theta=theta)], nqubits=1)
phase = np.exp(1j * theta / 2.0)
gate = np.array([[phase.real, -1j * phase.imag],
[-1j * phase.imag, phase.real]])
target_state = gate.dot(np.ones(2)) / np.sqrt(2)
K.assert_allclose(final_state, target_state)
def test_ry(backend):
theta = 0.1234
final_state = apply_gates([gates.H(0), gates.RY(0, theta=theta)], nqubits=1)
phase = np.exp(1j * theta / 2.0)
gate = np.array([[phase.real, -phase.imag],
[phase.imag, phase.real]])
target_state = gate.dot(np.ones(2)) / np.sqrt(2)
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("applyx", [True, False])
def test_rz(backend, applyx):
theta = 0.1234
if applyx:
gatelist = [gates.X(0)]
else:
gatelist = []
gatelist.append(gates.RZ(0, theta))
final_state = apply_gates(gatelist, nqubits=1)
target_state = np.zeros_like(final_state)
p = int(applyx)
target_state[p] = np.exp((2 * p - 1) * 1j * theta / 2.0)
K.assert_allclose(final_state, target_state)
def test_u1(backend):
theta = 0.1234
final_state = apply_gates([gates.X(0), gates.U1(0, theta)], nqubits=1)
target_state = np.zeros_like(final_state)
target_state[1] = np.exp(1j * theta)
K.assert_allclose(final_state, target_state)
def test_u2(backend):
phi = 0.1234
lam = 0.4321
initial_state = random_state(1)
final_state = apply_gates([gates.U2(0, phi, lam)], initial_state=initial_state)
matrix = np.array([[np.exp(-1j * (phi + lam) / 2), -np.exp(-1j * (phi - lam) / 2)],
[np.exp(1j * (phi - lam) / 2), np.exp(1j * (phi + lam) / 2)]])
target_state = matrix.dot(initial_state) / np.sqrt(2)
K.assert_allclose(final_state, target_state)
def test_u3(backend):
theta = 0.1111
phi = 0.1234
lam = 0.4321
initial_state = random_state(1)
final_state = apply_gates([gates.U3(0, theta, phi, lam)],
initial_state=initial_state)
cost, sint = np.cos(theta / 2), np.sin(theta / 2)
ep = np.exp(1j * (phi + lam) / 2)
em = np.exp(1j * (phi - lam) / 2)
matrix = np.array([[ep.conj() * cost, - em.conj() * sint],
[em * sint, ep * cost]])
target_state = matrix.dot(initial_state)
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("applyx", [False, True])
def test_cnot(backend, applyx):
if applyx:
gatelist = [gates.X(0)]
else:
gatelist = []
gatelist.append(gates.CNOT(0, 1))
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.zeros_like(final_state)
target_state[3 * int(applyx)] = 1.0
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("controlled_by", [False, True])
def test_cz(backend, controlled_by):
initial_state = random_state(2)
matrix = np.eye(4)
matrix[3, 3] = -1
target_state = matrix.dot(initial_state)
if controlled_by:
gate = gates.Z(1).controlled_by(0)
else:
gate = gates.CZ(0, 1)
final_state = apply_gates([gate], initial_state=initial_state)
assert gate.name == "cz"
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("name,params",
[("CRX", {"theta": 0.1}),
("CRY", {"theta": 0.2}),
("CRZ", {"theta": 0.3}),
("CU1", {"theta": 0.1}),
("CU2", {"phi": 0.1, "lam": 0.2}),
("CU3", {"theta": 0.1, "phi": 0.2, "lam": 0.3})])
def test_cun(backend, name, params):
initial_state = random_state(2)
gate = getattr(gates, name)(0, 1, **params)
final_state = apply_gates([gate], initial_state=initial_state)
target_state = np.dot(K.to_numpy(gate.matrix), initial_state)
K.assert_allclose(final_state, target_state)
def test_swap(backend):
final_state = apply_gates([gates.X(1), gates.SWAP(0, 1)], nqubits=2)
target_state = np.zeros_like(final_state)
target_state[2] = 1.0
K.assert_allclose(final_state, target_state)
def test_multiple_swap(backend):
gatelist = [gates.X(0), gates.X(2), gates.SWAP(0, 1), gates.SWAP(2, 3)]
final_state = apply_gates(gatelist, nqubits=4)
gatelist = [gates.X(1), gates.X(3)]
target_state = apply_gates(gatelist, nqubits=4)
K.assert_allclose(final_state, target_state)
def test_fsim(backend):
theta = 0.1234
phi = 0.4321
gatelist = [gates.H(0), gates.H(1), gates.fSim(0, 1, theta, phi)]
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.ones_like(K.to_numpy(final_state)) / 2.0
rotation = np.array([[np.cos(theta), -1j * np.sin(theta)],
[-1j * np.sin(theta), np.cos(theta)]])
matrix = np.eye(4, dtype=target_state.dtype)
matrix[1:3, 1:3] = rotation
matrix[3, 3] = np.exp(-1j * phi)
target_state = matrix.dot(target_state)
K.assert_allclose(final_state, target_state)
def test_generalized_fsim(backend):
phi = np.random.random()
rotation = np.random.random((2, 2)) + 1j * np.random.random((2, 2))
gatelist = [gates.H(0), gates.H(1), gates.H(2)]
gatelist.append(gates.GeneralizedfSim(1, 2, rotation, phi))
final_state = apply_gates(gatelist, nqubits=3)
target_state = np.ones_like(K.to_numpy(final_state)) / np.sqrt(8)
matrix = np.eye(4, dtype=target_state.dtype)
matrix[1:3, 1:3] = rotation
matrix[3, 3] = np.exp(-1j * phi)
target_state[:4] = matrix.dot(target_state[:4])
target_state[4:] = matrix.dot(target_state[4:])
K.assert_allclose(final_state, target_state)
def test_generalized_fsim_parameter_setter(backend):
phi = np.random.random()
matrix = np.random.random((2, 2))
gate = gates.GeneralizedfSim(0, 1, matrix, phi)
K.assert_allclose(gate.parameters[0], matrix)
assert gate.parameters[1] == phi
matrix = np.random.random((4, 4))
with pytest.raises(ValueError):
gate = gates.GeneralizedfSim(0, 1, matrix, phi)
@pytest.mark.parametrize("applyx", [False, True])
def test_toffoli(backend, applyx):
if applyx:
gatelist = [gates.X(0), gates.X(1), gates.TOFFOLI(0, 1, 2)]
else:
gatelist = [gates.X(1), gates.TOFFOLI(0, 1, 2)]
final_state = apply_gates(gatelist, nqubits=3)
target_state = np.zeros_like(final_state)
if applyx:
target_state[-1] = 1
else:
target_state[2] = 1
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("nqubits", [2, 3])
def test_unitary(backend, nqubits):
initial_state = np.ones(2 ** nqubits) / np.sqrt(2 ** nqubits)
matrix = np.random.random(2 * (2 ** (nqubits - 1),))
target_state = np.kron(np.eye(2), matrix).dot(initial_state)
gatelist = [gates.H(i) for i in range(nqubits)]
gatelist.append(gates.Unitary(matrix, *range(1, nqubits), name="random"))
final_state = apply_gates(gatelist, nqubits=nqubits)
K.assert_allclose(final_state, target_state)
def test_unitary_initialization(backend):
matrix = np.random.random((4, 4))
gate = gates.Unitary(matrix, 0, 1)
K.assert_allclose(gate.parameters, matrix)
matrix = np.random.random((8, 8))
with pytest.raises(ValueError):
gate = gates.Unitary(matrix, 0, 1)
with pytest.raises(TypeError):
gate = gates.Unitary("abc", 0, 1)
def test_unitary_common_gates(backend):
target_state = apply_gates([gates.X(0), gates.H(1)], nqubits=2)
gatelist = [gates.Unitary(np.array([[0, 1], [1, 0]]), 0),
gates.Unitary(np.array([[1, 1], [1, -1]]) / np.sqrt(2), 1)]
final_state = apply_gates(gatelist, nqubits=2)
K.assert_allclose(final_state, target_state)
thetax = 0.1234
thetay = 0.4321
gatelist = [gates.RX(0, theta=thetax), gates.RY(1, theta=thetay),
gates.CNOT(0, 1)]
target_state = apply_gates(gatelist, nqubits=2)
rx = np.array([[np.cos(thetax / 2), -1j * np.sin(thetax / 2)],
[-1j * np.sin(thetax / 2), np.cos(thetax / 2)]])
ry = np.array([[np.cos(thetay / 2), -np.sin(thetay / 2)],
[np.sin(thetay / 2), np.cos(thetay / 2)]])
cnot = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
gatelist = [gates.Unitary(rx, 0), gates.Unitary(ry, 1),
gates.Unitary(cnot, 0, 1)]
final_state = apply_gates(gatelist, nqubits=2)
K.assert_allclose(final_state, target_state)
def test_unitary_multiqubit(backend):
gatelist = [gates.H(i) for i in range(4)]
gatelist.append(gates.CNOT(0, 1))
gatelist.append(gates.CNOT(2, 3))
gatelist.extend(gates.X(i) for i in range(4))
h = np.array([[1, 1], [1, -1]]) / np.sqrt(2)
x = np.array([[0, 1], [1, 0]])
cnot = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
matrix = np.kron(np.kron(x, x), np.kron(x, x))
matrix = matrix @ np.kron(cnot, cnot)
matrix = matrix @ np.kron(np.kron(h, h), np.kron(h, h))
unitary = gates.Unitary(matrix, 0, 1, 2, 3)
if K.name == "qibotf":
with pytest.raises(NotImplementedError):
final_state = apply_gates([unitary], nqubits=4)
else:
final_state = apply_gates([unitary], nqubits=4)
target_state = apply_gates(gatelist, nqubits=4)
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("nqubits", [5, 6])
def test_variational_layer(backend, nqubits):
theta = 2 * np.pi * np.random.random(nqubits)
gatelist = [gates.RY(i, t) for i, t in enumerate(theta)]
gatelist.extend(gates.CZ(i, i + 1) for i in range(0, nqubits - 1, 2))
target_state = apply_gates(gatelist, nqubits=nqubits)
pairs = list((i, i + 1) for i in range(0, nqubits - 1, 2))
gate = gates.VariationalLayer(range(nqubits), pairs,
gates.RY, gates.CZ,
theta)
final_state = apply_gates([gate], nqubits=nqubits)
K.assert_allclose(target_state, final_state)
def test_variational_layer__construct_unitary(backend):
pairs = list((i, i + 1) for i in range(0, 5, 2))
theta = 2 * np.pi * np.random.random(6)
gate = gates.VariationalLayer(range(6), pairs, gates.RY, gates.CZ, theta)
with pytest.raises(ValueError):
gate._construct_unitary()
def test_flatten(backend):
target_state = np.ones(4) / 2.0
final_state = apply_gates([gates.Flatten(target_state)], nqubits=2)
K.assert_allclose(final_state, target_state)
target_state = np.ones(4) / 2.0
gate = gates.Flatten(target_state)
with pytest.raises(ValueError):
gate._construct_unitary()
def test_callback_gate_errors():
from qibo import callbacks
entropy | |
<filename>deepof/data.py
# @author lucasmiranda42
# encoding: utf-8
# module deepof
"""
Data structures for preprocessing and wrangling of DLC output data.
Project: initial structure for specifying the characteristics of the project.
Coordinates: result of running the project. In charge of calling all relevant
computations for getting the data into the desired shape
TableDict: python dict subclass for storing experimental instances as pandas.DataFrames.
Contains methods for generating training and test sets ready for model training.
"""
import copy
import os
import warnings
from collections import defaultdict
from typing import Dict
from typing import Tuple, Any, List, NewType
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from joblib import delayed, Parallel, parallel_backend
from pkg_resources import resource_filename
from sklearn import random_projection
from sklearn.decomposition import KernelPCA
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.manifold import TSNE
from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder
from tqdm import tqdm
import deepof.models
import deepof.pose_utils
import deepof.train_utils
import deepof.utils
import deepof.visuals
# DEFINE CUSTOM ANNOTATED TYPES #
project = NewType("deepof_project", Any)
coordinates = NewType("deepof_coordinates", Any)
table_dict = NewType("deepof_table_dict", Any)
# CLASSES FOR PREPROCESSING AND DATA WRANGLING
class Project:
"""
Class for loading and preprocessing DLC data of individual and multiple animals. All main computations are
handled from here.
"""
def __init__(
self,
arena_dims: int,
animal_ids: List = tuple([""]),
arena: str = "circular",
arena_detection: str = "rule-based",
enable_iterative_imputation: bool = True,
exclude_bodyparts: List = tuple([""]),
exp_conditions: dict = None,
high_fidelity_arena: bool = False,
interpolate_outliers: bool = True,
interpolation_limit: int = 2,
interpolation_std: int = 3,
likelihood_tol: float = 0.85,
model: str = "mouse_topview",
path: str = deepof.utils.os.path.join("."),
smooth_alpha: float = 2,
table_format: str = "autodetect",
frame_rate: int = None,
video_format: str = ".mp4",
):
"""
Initializes a Project object.
Args:
arena_dims (int): diameter of the arena in mm (so far, only round arenas are supported).
animal_ids (list): list of animal ids.
arena (str): arena type. So far, only 'circular' is supported.
arena_detection (str): method for detecting the arena (must be either 'rule-based' (default) or 'cnn').
enable_iterative_imputation (bool): whether to use iterative imputation for occluded body parts. Recommended,
but slow.
exclude_bodyparts (list): list of bodyparts to exclude from analysis.
exp_conditions (dict): dictionary with experiment IDs as keys and experimental conditions as values.
high_fidelity_arena (bool): whether to use high-fidelity arena detection. Recommended if light conditions
are uneven across videos.
interpolate_outliers (bool): whether to interpolate missing data.
interpolation_limit (int): maximum number of missing frames to interpolate.
interpolation_std (int): maximum number of standard deviations to interpolate.
likelihood_tol (float): likelihood threshold for outlier detection.
model (str): model to use for pose estimation. Defaults to 'mouse_topview' (as described in the documentation).
path (str): path to the folder containing the DLC output data.
smooth_alpha (float): smoothing intensity. The higher the value, the more smoothing.
table_format (str): format of the table. Defaults to 'autodetect', but can be set to "csv" or "h5".
frame_rate (int): frame rate of the videos. If not specified, it will be inferred from the video files.
video_format (str): video format. Defaults to '.mp4'.
"""
# Set working paths
self.path = path
self.video_path = os.path.join(self.path, "Videos")
self.table_path = os.path.join(self.path, "Tables")
self.trained_path = resource_filename(__name__, "trained_models")
# Detect files to load from disk
self.table_format = table_format
if self.table_format == "autodetect":
ex = [i for i in os.listdir(self.table_path) if not i.startswith(".")][0]
if ".h5" in ex:
self.table_format = ".h5"
elif ".csv" in ex:
self.table_format = ".csv"
self.videos = sorted(
[
vid
for vid in deepof.utils.os.listdir(self.video_path)
if vid.endswith(video_format) and not vid.startswith(".")
]
)
self.tables = sorted(
[
tab
for tab in deepof.utils.os.listdir(self.table_path)
if tab.endswith(self.table_format) and not tab.startswith(".")
]
)
assert len(self.videos) == len(
self.tables
), "Unequal number of videos and tables. Please check your file structure"
# Loads arena details and (if needed) detection models
self.arena = arena
self.arena_detection = arena_detection
self.arena_dims = arena_dims
self.ellipse_detection = None
if arena == "circular" and arena_detection == "cnn":
self.ellipse_detection = tf.keras.models.load_model(
[
os.path.join(self.trained_path, i)
for i in os.listdir(self.trained_path)
if i.startswith("elliptical")
][0]
)
# Set the rest of the init parameters
self.angles = True
self.animal_ids = animal_ids
self.distances = "all"
self.ego = False
self.exp_conditions = exp_conditions
self.high_fidelity = high_fidelity_arena
self.interpolate_outliers = interpolate_outliers
self.interpolation_limit = interpolation_limit
self.interpolation_std = interpolation_std
self.likelihood_tolerance = likelihood_tol
self.smooth_alpha = smooth_alpha
self.frame_rate = frame_rate
self.video_format = video_format
self.enable_iterative_imputation = enable_iterative_imputation
model_dict = {
"{}mouse_topview".format(aid): deepof.utils.connect_mouse_topview(aid)
for aid in self.animal_ids
}
self.connectivity = {aid: model_dict[aid + model] for aid in self.animal_ids}
# Remove specified body parts from the mice graph
self.exclude_bodyparts = exclude_bodyparts
if len(self.animal_ids) > 1 and self.exclude_bodyparts != tuple([""]):
self.exclude_bodyparts = [
aid + "_" + bp for aid in self.animal_ids for bp in exclude_bodyparts
]
if self.exclude_bodyparts != tuple([""]):
for aid in self.animal_ids:
for bp in self.exclude_bodyparts:
if bp.startswith(aid):
self.connectivity[aid].remove_node(bp)
def __str__(self): # pragma: no cover
if self.exp_conditions:
return "deepof analysis of {} videos across {} condition{}".format(
len(self.videos),
len(set(self.exp_conditions.values())),
("s" if len(set(self.exp_conditions.values())) > 1 else ""),
)
return "deepof analysis of {} videos".format(len(self.videos))
def __repr__(self): # pragma: no cover
if self.exp_conditions:
return "deepof analysis of {} videos across {} condition{}".format(
len(self.videos),
len(set(self.exp_conditions.values())),
("s" if len(set(self.exp_conditions.values())) > 1 else ""),
)
return "deepof analysis of {} videos".format(len(self.videos))
@property
def distances(self):
"""
List. If not 'all', sets the body parts among which the
distances will be computed
"""
return self._distances
@property
def ego(self):
"""
String, name of a body part. If True, computes only the distances
between the specified body part and the rest
"""
return self._ego
@property
def angles(self):
"""
Bool. Toggles angle computation. True by default. If turned off,
enhances performance for big datasets
"""
return self._angles
def get_arena(self, tables) -> np.array:
"""
Returns the arena as recognised from the videos
"""
scales = []
arena_params = []
video_resolution = []
if self.arena in ["circular"]:
for vid_index, _ in enumerate(self.videos):
ellipse, h, w = deepof.utils.recognize_arena(
videos=self.videos,
tables=tables,
vid_index=vid_index,
path=self.video_path,
arena_type=self.arena,
high_fidelity=self.high_fidelity,
detection_mode=self.arena_detection,
cnn_model=self.ellipse_detection,
)
# scales contains the coordinates of the center of the arena,
# the absolute diameter measured from the video in pixels, and
# the provided diameter in mm (1 -default- equals not provided)
scales.append(
list(
np.array(
[
ellipse[0][0],
ellipse[0][1],
np.mean([ellipse[1][0], ellipse[1][1]]) * 2,
]
)
)
+ [self.arena_dims]
)
arena_params.append(ellipse)
video_resolution.append((h, w))
else:
raise NotImplementedError("arenas must be set to one of: 'circular'")
return np.array(scales), arena_params, video_resolution
def load_tables(self, verbose: bool = False) -> deepof.utils.Tuple:
"""
Loads videos and tables into dictionaries.
Args:
verbose (bool): If True, prints the progress of data loading.
Returns:
Tuple: A tuple containing the following a dictionary with all loaded tables per experiment,
and another dictionary with DLC data quality.
"""
if self.table_format not in [".h5", ".csv"]:
raise NotImplementedError(
"Tracking files must be in either h5 or csv format"
)
if verbose:
print("Loading trajectories...")
tab_dict = {}
if self.table_format == ".h5":
tab_dict = {
deepof.utils.re.findall("(.*)DLC", tab)[0]: pd.read_hdf(
deepof.utils.os.path.join(self.table_path, tab), dtype=float
)
for tab in self.tables
}
elif self.table_format == ".csv":
tab_dict = {
deepof.utils.re.findall("(.*)DLC", tab)[0]: pd.read_csv(
deepof.utils.os.path.join(self.table_path, tab),
header=[0, 1, 2],
index_col=0,
dtype=float,
)
for tab in self.tables
}
# Pass a time-based index, if specified in init
if self.frame_rate is not None:
for key, tab in tab_dict.items():
tab_dict[key].index = pd.timedelta_range(
"00:00:00",
pd.to_timedelta((tab.shape[0] // self.frame_rate), unit="sec"),
periods=tab.shape[0] + 1,
closed="left",
).map(lambda t: str(t)[7:])
lik_dict = defaultdict()
for key, value in tab_dict.items():
x = value.xs("x", level="coords", axis=1, drop_level=False)
y = value.xs("y", level="coords", axis=1, drop_level=False)
lik = value.xs("likelihood", level="coords", axis=1, drop_level=True)
tab_dict[key] = pd.concat([x, y], axis=1).sort_index(axis=1)
lik_dict[key] = lik.droplevel("scorer", axis=1)
if self.smooth_alpha:
if verbose:
print("Smoothing trajectories...")
for key, tab in tab_dict.items():
cur_idx = tab.index
cur_cols = tab.columns
smooth = pd.DataFrame(
deepof.utils.smooth_mult_trajectory(
np.array(tab),
alpha=self.smooth_alpha,
w_length=15,
)
).reset_index(drop=True)
smooth.columns = cur_cols
smooth.index = cur_idx
tab_dict[key] = smooth
# Remove scorer header
for key, tab in tab_dict.items():
tab_dict[key] = tab.loc[:, tab.columns.levels[0][0]]
if self.exclude_bodyparts != tuple([""]):
for k, value in tab_dict.items():
temp = value.drop(self.exclude_bodyparts, axis=1, level="bodyparts")
temp.sort_index(axis=1, inplace=True)
temp.columns = pd.MultiIndex.from_product(
[sorted(list(set([i[j] for i in temp.columns]))) for j in range(2)]
)
tab_dict[k] = temp.sort_index(axis=1)
if self.interpolate_outliers:
if verbose:
print("Interpolating outliers...")
for k, value in tab_dict.items():
tab_dict[k] = deepof.utils.interpolate_outliers(
value,
lik_dict[k],
likelihood_tolerance=self.likelihood_tolerance,
mode="or",
limit=self.interpolation_limit,
n_std=self.interpolation_std,
)
if self.enable_iterative_imputation:
if verbose:
print("Iterative imputation of ocluded bodyparts...")
for k, value in | |
<reponame>ZhuangLab/Chromatin_Analysis_2020_cell
import sys
import glob
import os
import time
import copy
import numpy as np
import pickle as pickle
import multiprocessing as mp
# saving
import h5py
import ast
# plotting
import matplotlib
import matplotlib.pyplot as plt
# import other sub-packages
# import package parameters
from .. import _correction_folder, _corr_channels, _temp_folder,_distance_zxy,\
_sigma_zxy,_image_size, _allowed_colors, _num_buffer_frames, _num_empty_frames, _image_dtype
from . import _allowed_kwds, _max_num_seeds, _min_num_seeds, _spot_seeding_th
def __init__():
print(f"Loading field of view class")
pass
class Field_of_View():
"""Class of field-of-view of a certain sample, which includes all possible files across hybs and parameters"""
def __init__(self, parameters,
_fov_id=None, _fov_name=None,
_load_references=True, _color_info_kwargs={},
_create_savefile=True, _save_filename=None,
_savefile_kwargs={},
_segmentation_kwargs={},
_load_all_attrs=True,
_overwrite_attrs=False,
_verbose=True,
):
## Initialize key attributes:
#: attributes for unprocessed images:
# correction profiles
self.correction_profiles = {'bleed':None,
'chromatic':None,
'illumination':None,}
# drifts
self.drift = {}
# rotations
self.rotation = {}
# segmentation
if 'segmentation_dim' not in _segmentation_kwargs:
self.segmentation_dim = 2 # default is 2d segmentation
else:
self.segmentation_dim = int(_segmentation_kwargs['segmentation_dim'])
#: attributes for processed images:
# splitted processed images
self.im_dict = {}
# channel dict corresponding to im_dict
self.channel_dict = {}
## check input datatype
if not isinstance(parameters, dict):
raise TypeError(f'wrong input type of parameters, should be dict containing essential info, but {type(parameters)} is given!')
## required parameters:
# data_folder: str of folder or list of str of folders
if 'data_folder' not in parameters:
raise KeyError(f"data_folder is required in parameters.")
if isinstance(parameters['data_folder'], list):
self.data_folder = [str(_fd) for _fd in parameters['data_folder']]
else:
self.data_folder = [str(parameters['data_folder'])]
## extract hybe folders and field-of-view names
self.folders = []
for _fd in self.data_folder:
from ..get_img_info import get_folders
_hyb_fds, _fovs = get_folders(_fd, feature='H', verbose=True)
self.folders += _hyb_fds # here only extract folders not fovs
if _fov_name is None and _fov_id is None:
raise ValueError(f"either _fov_name or _fov_id should be given!")
elif _fov_id is not None:
_fov_id = int(_fov_id)
# define fov_name
_fov_name = _fovs[_fov_id]
else:
_fov_name = str(_fov_name)
if _fov_name not in _fovs:
raise ValueError(f"_fov_name:{_fov_name} should be within fovs:{_fovs}")
_fov_id = _fovs.index(_fov_name)
# append fov information
self.fov_id = _fov_id
self.fov_name = _fov_name
# experiment_folder
if 'experiment_folder' in parameters:
self.experiment_folder = parameters['experiment_folder']
else:
self.experiment_folder = os.path.join(self.data_folder[0], 'Experiment')
## analysis_folder, segmentation_folder, save_folder, correction_folder,map_folder
if 'analysis_folder' in parameters:
self.analysis_folder = str(parameters['analysis_folder'])
else:
self.analysis_folder = os.path.join(self.data_folder[0], 'Analysis')
if 'segmentation_folder' in parameters:
self.segmentation_folder = parameters['segmentation_folder']
else:
self.segmentation_folder = os.path.join(self.analysis_folder, 'segmentation')
# save folder
if 'save_folder' in parameters:
self.save_folder = parameters['save_folder']
else:
self.save_folder = os.path.join(self.analysis_folder,'save')
if 'correction_folder' in parameters:
self.correction_folder = parameters['correction_folder']
else:
self.correction_folder = _correction_folder
if 'drift_folder' in parameters:
self.drift_folder = parameters['drift_folder']
else:
self.drift_folder = os.path.join(self.analysis_folder, 'drift')
if 'map_folder' in parameters:
self.map_folder = parameters['map_folder']
else:
self.map_folder = os.path.join(self.analysis_folder, 'distmap')
# number of num_threads
if 'num_threads' in parameters:
self.num_threads = parameters['num_threads']
else:
self.num_threads = int(os.cpu_count() / 4) # default: use one third of cpus.
# ref_id
if 'ref_id' in parameters:
self.ref_id = int(parameters['ref_id'])
else:
self.ref_id = 0
## shared_parameters
# initialize
if 'shared_parameters' in parameters:
self.shared_parameters = parameters['shared_parameters']
else:
self.shared_parameters = {}
# add parameter keys:
if 'image_dtype' not in self.shared_parameters:
self.shared_parameters['image_dtype'] = _image_dtype
if 'distance_zxy' not in self.shared_parameters:
self.shared_parameters['distance_zxy'] = _distance_zxy
if 'sigma_zxy' not in self.shared_parameters:
self.shared_parameters['sigma_zxy'] = _sigma_zxy
if 'single_im_size' not in self.shared_parameters:
self.shared_parameters['single_im_size'] = _image_size
if 'num_buffer_frames' not in self.shared_parameters:
self.shared_parameters['num_buffer_frames'] = _num_buffer_frames
if 'num_empty_frames' not in self.shared_parameters:
self.shared_parameters['num_empty_frames'] = _num_empty_frames
if 'normalization' not in self.shared_parameters:
self.shared_parameters['normalization'] = False
if 'corr_channels' not in self.shared_parameters:
self.shared_parameters['corr_channels'] = _corr_channels
if 'corr_bleed' not in self.shared_parameters:
self.shared_parameters['corr_bleed'] = True
if 'corr_Z_shift' not in self.shared_parameters:
self.shared_parameters['corr_Z_shift'] = True
if 'corr_hot_pixel' not in self.shared_parameters:
self.shared_parameters['corr_hot_pixel'] = True
if 'corr_illumination' not in self.shared_parameters:
self.shared_parameters['corr_illumination'] = True
if 'corr_chromatic' not in self.shared_parameters:
self.shared_parameters['corr_chromatic'] = True
if 'allowed_kwds' not in self.shared_parameters:
self.shared_parameters['allowed_data_types'] = _allowed_kwds
# params for drift
if 'max_num_seeds' not in self.shared_parameters:
self.shared_parameters['max_num_seeds'] = _max_num_seeds
if 'min_num_seeds' not in self.shared_parameters:
self.shared_parameters['min_num_seeds'] = _min_num_seeds
if 'drift_size' not in self.shared_parameters:
self.shared_parameters['drift_size'] = 600
if 'drift_use_fft' not in self.shared_parameters:
self.shared_parameters['drift_use_fft'] = True
if 'drift_sequential' not in self.shared_parameters:
self.shared_parameters['drift_sequential'] = False
if 'good_drift_th' not in self.shared_parameters:
self.shared_parameters['good_drift_th'] = 1.
# param for spot_finding
if 'spot_seeding_th' not in self.shared_parameters:
self.shared_parameters['spot_seeding_th'] = _spot_seeding_th
if 'normalize_intensity_local' not in self.shared_parameters:
self.shared_parameters['normalize_intensity_local'] = True
## load experimental info
if _load_references:
if '_color_filename' not in _color_info_kwargs:
self.color_filename = 'Color_Usage'
_color_info_kwargs['_color_filename'] = self.color_filename
else:
self.color_filename = _color_info_kwargs['_color_filename']
if '_color_format' not in _color_info_kwargs:
self.color_format = 'csv'
_color_info_kwargs['_color_format'] = self.color_format
else:
self.color_format = _color_info_kwargs['_color_format']
_color_dic = self._load_color_info(_annotate_folders=True, **_color_info_kwargs)
## Drift
# update ref_filename
self.ref_filename = os.path.join(self.annotated_folders[self.ref_id], self.fov_name)
# update drift filename
_dft_fl_postfix = '_current_cor.pkl'
if self.shared_parameters['drift_sequential']:
_dft_fl_postfix = '_sequential'+_dft_fl_postfix
self.drift_filename = os.path.join(self.drift_folder,
self.fov_name.replace('.dax', _dft_fl_postfix))
# generate drift crops
from ..correction_tools.alignment import generate_drift_crops
self.drift_crops = generate_drift_crops(
drift_size=self.shared_parameters['drift_size'],
single_im_size=self.shared_parameters['single_im_size'],
)
## Create savefile
# save filename
if _save_filename is None:
_save_filename = os.path.join(self.save_folder, self.fov_name.replace('.dax', '.hdf5'))
# set save_filename attr
self.save_filename = _save_filename
# initialize save file
if _create_savefile:
self._init_save_file(_save_filename=_save_filename,
_overwrite=_overwrite_attrs,
**_savefile_kwargs)
## Load basic info
def _load_color_info(self, _color_filename=None, _color_format=None,
_save_color_dic=True, _annotate_folders=False):
"""Function to load color usage representing experimental info"""
## check inputs
if _color_filename is None:
_color_filename = self.color_filename
if _color_format is None:
_color_format = self.color_format
from ..get_img_info import Load_Color_Usage, find_bead_channel, find_dapi_channel
_color_dic, _use_dapi, _channels = Load_Color_Usage(self.analysis_folder,
color_filename=_color_filename,
color_format=_color_format,
return_color=True)
# need-based store color_dic
if _save_color_dic:
self.color_dic = _color_dic
# store other info
self.use_dapi = _use_dapi
self.channels = [str(ch) for ch in _channels]
# channel for beads
_bead_channel = find_bead_channel(_color_dic)
self.bead_channel_index = _bead_channel
_dapi_channel = find_dapi_channel(_color_dic)
self.dapi_channel_index = _dapi_channel
# get annotated folders by color usage
if _annotate_folders:
self.annotated_folders = []
for _hyb_fd, _info in self.color_dic.items():
_matches = [_fd for _fd in self.folders if _hyb_fd == _fd.split(os.sep)[-1]]
if len(_matches)==1:
self.annotated_folders.append(_matches[0])
print(f"- {len(self.annotated_folders)} folders are found according to color-usage annotation.")
return _color_dic
### Here are some initialization functions
def _init_save_file(self, _save_filename=None,
_overwrite=False, _verbose=True):
"""Function to initialize save file for FOV object
Inputs:
_save_filename: full path for filename saving this dataset.
_overwrite: whether overwrite existing info within save_file, bool (default: False)
_verbose: say something!, bool (default: True)
Outputs:
save_file created, current info saved.
"""
if _save_filename is None:
_save_filename = getattr(self, 'save_filename')
# set save_filename attr
setattr(self, 'save_filename', _save_filename)
if _verbose:
if not os.path.exists(_save_filename):
print(f"- Creating save file for fov:{self.fov_name}: {_save_filename}.")
else:
print(f"- Initialize save file for fov:{self.fov_name}: {_save_filename}.")
## initialize fov_info, segmentation and correction
for _type in ['fov_info', 'segmentation', 'correction']:
self._save_to_file(_type, _overwrite=_overwrite, _verbose=_verbose)
## initialize image data types
from .batch_functions import _color_dic_stat
# acquire valid types
_type_dic = _color_dic_stat(self.color_dic,
self.channels,
self.shared_parameters['allowed_data_types']
)
# create
for _type, _dict in _type_dic.items():
self._save_to_file(_type, _overwrite=_overwrite, _verbose=_verbose)
return
def _old_init_save_file(self, _save_filename=None,
_overwrite=False, _verbose=True):
"""Function to initialize save file for FOV object"""
if _save_filename is None:
_save_filename = getattr(self, 'save_filename')
# set save_filename attr
setattr(self, 'save_filename', _save_filename)
if _verbose and not os.path.exists(_save_filename):
print(f"- Creating save file for fov:{self.fov_name}: {_save_filename}")
with h5py.File(_save_filename, "a", libver='latest') as _f:
if _verbose:
print(f"- Updating info for fov:{self.fov_name}: {_save_filename}")
## self specific attributes stored directly in attributes:
_base_attrs = []
for _attr_name in dir(self):
# exclude all default attrs and functions
if _attr_name[0] != '_' and getattr(self, _attr_name) is not None:
# set default to be save
_info_attr_flag = True
# if included into data_type, not save here
for _name in self.shared_parameters['allowed_data_types'].keys():
# give some criteria
if _name in _attr_name:
_info_attr_flag = False
break
# if its image dict, exclude
if 'im_dict' in _attr_name or 'channel_dict' in _attr_name:
_info_attr_flag = False
# if its segmentation, exclude
if 'segmentation' in _attr_name:
_info_attr_flag = False
# if its related to correction, exclude
if 'correction' in _attr_name:
_info_attr_flag = False
## all the rest attrs saved to here:
# save here:
if _info_attr_flag:
# extract the attribute
_attr = getattr(self, _attr_name)
# convert dict if necessary
if isinstance(_attr, dict):
_attr = str(_attr)
# save
if _attr_name not in _f.attrs or _overwrite:
_f.attrs[_attr_name] = _attr
_base_attrs.append(_attr_name)
if _verbose:
print(f"-- base attributes updated:{_base_attrs}")
## segmentation
if 'segmentation' not in _f.keys():
_grp = _f.create_group('segmentation') # create segmentation group
else:
_grp = _f['segmentation']
# directly create segmentation label dataset
if 'segmentation_label' not in _grp:
_seg | |
"""Script that visualizes dependencies of Nix packages"""
import argparse
import configparser
import itertools
import os
import random
import shlex
import subprocess
import sys
import tempfile
import logging
import networkx as nx
import pygraphviz as pgv
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from . import util
from .graph_objects import Node, Edge
logger = logging.getLogger(__name__)
#: Default values for things we expect in the config file
CONFIG_OPTIONS = {
"aspect_ratio": (2, float),
"dpi": (300, int),
"font_scale": (1.0, float),
"color_scatter": (1.0, float),
"edge_color": ("#888888", str),
"font_color": ("#888888", str),
"color_map": ("rainbow", str),
"img_y_height_inches": (24, float),
"y_sublevels": (5, int),
"y_sublevel_spacing": (0.2, float),
"num_iterations": (100, int),
"edge_alpha": (0.3, float),
"edge_width_scale": (1.0, float),
"max_displacement": (2.5, float),
"top_level_spacing": (100, float),
"repulsive_force_normalization": (2.0, float),
"attractive_force_normalization": (1.0, float),
"add_size_per_out_link": (200, int),
"max_node_size_over_min_node_size": (5.0, float),
"min_node_size": (100.0, float),
"tmax": (30.0, float),
"show_labels": (1, int)
}
class Graph(object):
"""Class representing a dependency tree"""
def __init__(self, packages, config, output_file, do_write=True):
"""Initialize a graph from the result of a nix-store command"""
self.config = self._parse_config(config)
self.nodes = []
self.edges = []
self.root_package_names = [util.remove_nix_hash(os.path.basename(x)) for
x in packages]
for package in packages:
# Run nix-store -q --graph <package>. This generates a graphviz
# file with package dependencies
cmd = ("nix-store -q --graph {}".format(package))
res = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = res.communicate()
if res.returncode != 0:
raise util.TreeCLIError("nix-store call failed, message "
"{}".format(stderr))
package_nodes, package_edges = self._get_edges_and_nodes(stdout)
self.nodes.extend(package_nodes)
self.edges.extend(package_edges)
self.nodes = list(set(self.nodes))
self._add_edges_to_nodes()
# The package itself is level 0, its direct dependencies are
# level 1, their direct dependencies are level 2, etc.
for n in self.nodes:
n.add_level()
self.depth = max([x.level for x in self.nodes]) + 1
logger.info("Graph has {} nodes, {} edges and a depth of {}".format(
len(self.nodes), len(self.edges), self.depth))
# Transform the Nodes and Edges into a networkx graph
self.G = nx.DiGraph()
for node in self.nodes:
self.G.add_node(node)
for parent in node.parents:
self.G.add_edge(node, parent)
self._add_pos_to_nodes()
if do_write is True:
self.write_frame_png(filename=output_file)
def _parse_config(self, config, verbose=True):
"""Load visualization parameters from config file or take defaults
if they are not in there
"""
configfile = config[0]
configsection = config[1]
return_configs = {}
if configfile is not None:
configs = configparser.ConfigParser()
configs.read(configfile)
if len(configs.sections()) > 1:
if configsection is None:
raise util.TreeCLIError("Config file {} contains more than "
"one section, so -s must be set".format(
configfile))
elif configsection not in configs.sections():
raise util.TreeCLIError("Config file {} does not contain a "
"section named {}".format(
configfile, configsection))
else:
# There is only one section in the file, just read it
configsection = configs.sections()[0]
else:
logger.info("--configfile not set, using all defaults")
return {k: v[0] for k, v in CONFIG_OPTIONS.items()}
logger.info("Reading section [{}] of file {}".format(configsection,
configfile))
# Loop through config options. If there is a corresponding key in the
# config file, overwrite, else take the value from the defaults
for param, (p_default, p_dtype) in CONFIG_OPTIONS.items():
try:
return_configs[param] = p_dtype(
configs.get(configsection, param))
logger.debug("Setting {} to {}".format(param,
return_configs[param]))
except (ConfigParser.NoOptionError, ValueError):
return_configs[param] = p_dtype(p_default)
logger.info( "Adding default of {} for {}".format(
p_dtype(p_default), param))
return return_configs
def write_frame_png(self, filename="nix-tree.png"):
"""Dump the graph to a png file"""
try:
cmap = getattr(matplotlib.cm, self.config["color_map"])
except AttributeError:
raise util.TreeCLIError("Colormap {} does not exist".format(
self.config["color_map"]))
pos = {n: (n.x, n.y) for n in self.nodes}
col_scale = 255.0/(self.depth+1.0)
col = [(x.level+random.random()*self.config["color_scatter"])*col_scale
for x in self.G.nodes()]
col = [min([x,255]) for x in col]
img_y_height=self.config["img_y_height_inches"]
size_min = self.config["min_node_size"]
size_max = self.config["max_node_size_over_min_node_size"] * size_min
plt.figure(1, figsize=(img_y_height*self.config["aspect_ratio"],
img_y_height))
node_size = [min(size_min + (x.out_degree-1)*
self.config["add_size_per_out_link"],
size_max) if x.level > 0 else size_max for
x in self.G.nodes()]
# Draw edges
nx.draw(self.G, pos, node_size=node_size, arrows=False,
with_labels=self.config["show_labels"],
edge_color=self.config["edge_color"],
font_size=12*self.config["font_scale"],
node_color=col, vmin=0, vmax=256,
width=self.config["edge_width_scale"],
alpha=self.config["edge_alpha"], nodelist=[])
# Draw nodes
nx.draw(self.G, pos, node_size=node_size, arrows=False,
with_labels=self.config["show_labels"],
font_size=12*self.config["font_scale"],
node_color=col, vmin=0, vmax=255, edgelist=[],
font_weight="light", cmap=cmap,
font_color=self.config["font_color"])
logger.info("Writing png file: {}".format(filename))
plt.savefig(filename, dpi=self.config["dpi"])
plt.close()
def _add_pos_to_nodes(self):
"""Populates every node with an x an y position using the following
iterative algorithm:
* start at t=0
* Apply an x force to each node that is proportional to the offset
between its x position and the average position of its parents
* Apply an x force to each node that pushes it away from its siblings
with a force proportional to 1/d, where d is the distance between
the node and its neighbor
* advance time forward by dt=tmax/num_iterations, displace particles
by F*dt
* repeat until the number of iterations has been exhausted
"""
logger.info("Adding positions to nodes")
#: The distance between levels in arbitrary units. Used to set a
#: scale on the diagram
level_height = 10
#: Maximum displacement of a point on a single iteration
max_displacement = level_height * self.config["max_displacement"]
#: The timestep to take on each iteration
dt = self.config["tmax"]/self.config["num_iterations"]
number_top_level = len([x for x in self.nodes if x.level == 0])
count_top_level = 0
# Initialize x with a random position unless you're the top level
# package, then space nodes evenly
for n in self.nodes:
if n.level == 0:
n.x = float(count_top_level)*self.config["top_level_spacing"]
count_top_level += 1
n.y = self.depth * level_height
else:
n.x = ((number_top_level + 1) *
self.config["top_level_spacing"] * random.random())
for iternum in range(self.config["num_iterations"]):
if iternum in range(0,self.config["num_iterations"],
int(self.config["num_iterations"]/10)):
logger.debug("Completed iteration {} of {}".format(iternum,
self.config["num_iterations"]))
total_abs_displacement = 0.0
for level in range(1, self.depth):
# Get the y-offset by cycling with other nodes in the
# same level
xpos = [(x.name, x.x) for x in self.level(level)]
xpos = sorted(xpos, key=lambda x:x[1])
xpos = zip(xpos,
itertools.cycle(range(self.config["y_sublevels"])))
pos_sorter = {x[0][0]: x[1] for x in xpos}
for n in self.level(level):
n.y = ((self.depth - n.level) * level_height +
pos_sorter[n.name] *
self.config["y_sublevel_spacing"]*level_height)
for lev_node in self.level(level):
# We pull nodes toward their parents
dis = [parent.x - lev_node.x for
parent in lev_node.parents]
# And push nodes away from their siblings with force 1/r
sibs = self.level(level)
sdis = [1.0/(sib.x - lev_node.x) for
sib in sibs if abs(sib.x-lev_node.x) > 1e-3]
total_sdis = (
sum(sdis) *
self.config["repulsive_force_normalization"])
total_displacement = (
self.config["attractive_force_normalization"] *
float(sum(dis)) / len(dis))
# Limit each of the displacements to the max displacement
dx_parent = util.clamp(total_displacement, max_displacement)
lev_node.dx_parent = dx_parent
dx_sibling = util.clamp(total_sdis, max_displacement)
lev_node.dx_sibling = -dx_sibling
for lev_node in self.level(level):
lev_node.x += lev_node.dx_parent * dt
lev_node.x += lev_node.dx_sibling * dt
total_abs_displacement += (abs(lev_node.dx_parent * dt) +
abs(lev_node.dx_sibling * dt))
def level(self, level):
"""Return a list of all nodes on a given level
"""
return [x for x in self.nodes if x.level == level]
def levels(self, min_level=0):
"""An iterator over levels, yields all the nodes in each level"""
for i in range(min_level,self.depth):
yield self.level(i)
def nodes_by_prefix(self, name):
"""Return a list of all nodes whose names begin with a given prefix
"""
return [x for x in self.nodes if x.name.startswith(name)]
def _get_edges_and_nodes(self, raw_lines):
"""Transform a raw GraphViz file into Node and Edge objects. Note
that at this point the nodes and edges are not linked into a graph
they are simply two lists of items."""
tempf = tempfile.NamedTemporaryFile(delete=False)
tempf.write(raw_lines)
tempf.close()
G = pgv.AGraph(tempf.name)
all_edges = []
all_nodes = []
for node in G.nodes():
if (util.remove_nix_hash(node.name) not
in [n.name for n in all_nodes]):
all_nodes.append(Node(node.name))
for edge in G.edges():
all_edges.append(Edge(edge[0], edge[1]))
return all_nodes, all_edges
def _add_edges_to_nodes(self):
"""Given the lists of Edges and Nodes, add parents and children to
nodes by following each edge
"""
for edge in self.edges:
nfrom = [n for n in self.nodes if n.name == edge.nfrom]
nto = [n for n in self.nodes if n.name == edge.nto]
nfrom = nfrom[0]
nto = nto[0]
if nfrom.name == nto.name:
# Disallow self-references
continue
if nto not in nfrom.parents:
nfrom.add_parent(nfrom, nto)
if nfrom not in nto.children:
nto.add_child(nfrom, nto)
def __repr__(self):
"""Basic print of Graph, show the package name and the number of
dependencies on each level
"""
head = self.level(0)
ret_str = "Graph of package: {}".format(head[0].name)
for ilevel, level in enumerate(self.levels(min_level=1)):
ret_str += "\n\tOn level {} there are {} packages".format(
ilevel+1, len(level))
return ret_str
def init_logger(debug=False):
"""Sets up logging for this cli"""
log_level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format="%(levelname)s %(message)s\033[1;0m",
stream=sys.stderr, level=log_level)
logging.addLevelName(logging.CRITICAL,
"\033[1;37m[\033[1;31mCRIT\033[1;37m]\033[0;31m")
logging.addLevelName(logging.ERROR,
"\033[1;37m[\033[1;33mERR \033[1;37m]\033[0;33m")
logging.addLevelName(logging.WARNING,
"\033[1;37m[\033[1;33mWARN\033[1;37m]\033[0;33m")
logging.addLevelName(logging.INFO,
"\033[1;37m[\033[1;32mINFO\033[1;37m]\033[0;37m")
logging.addLevelName(logging.DEBUG,
"\033[1;37m[\033[1;34mDBUG\033[1;37m]\033[0;34m")
def main():
"""Parse command line arguments, instantiate graph and dump image"""
parser = argparse.ArgumentParser()
parser.add_argument("packages",
| |
<filename>mevis/_internal/conversion.py
from collections.abc import Callable as _Callable
import networkx as _nx
from opencog.type_constructors import AtomSpace as _AtomSpace
from .args import check_arg as _check_arg
def convert(data, graph_annotated=True, graph_directed=True,
node_label=None, node_color=None, node_opacity=None, node_size=None, node_shape=None,
node_border_color=None, node_border_size=None,
node_label_color=None, node_label_size=None, node_hover=None, node_click=None,
node_image=None, node_properties=None,
edge_label=None, edge_color=None, edge_opacity=None, edge_size=None,
edge_label_color=None, edge_label_size=None, edge_hover=None, edge_click=None):
"""Convert an Atomspace or list of Atoms to a NetworkX graph with annotations.
Several arguments accept a Callable.
- In case of node annotations, the Callable gets an Atom as input,
which the node represents in the graph.
The Callable needs to return one of the other types accepted by the argument,
e.g. ``str`` or ``int``/``float``.
- In case of edge annotations, the Callable gets two Atoms as input,
which the edge connects in the graph.
The Callable needs to return one of the other types accepted by the argument,
e.g. ``str`` or ``int``/``float``.
Several arguments accept a color, which can be in following formats:
- Name: ``"black"``, ``"red"``, ``"green"``, ...
- Color code
- 6 digit hex RGB code: ``"#05ac05"``
- 3 digit hex RGB code: ``"#0a0"`` (equivalent to ``"#00aa00"``)
Parameters
----------
data : Atomspace, list of Atoms
Input that gets converted to a graph.
graph_annotated : bool
If ``False``, no annotations are added to the graph. This could be used for
converting large AtomSpaces quickly to graphs that use less RAM and can
be exported to smaller files (e.g. also compressed as gml.gz) for inspection
with other tools.
graph_directed : bool
If ``True``, a NetworkX DiGraph is created. If ``False``, a NetworkX Graph is created.
node_label : str, Callable
Set a label for each node, which is shown as text below it.
node_color : str, Callable
Set a color for each node, which becomes the fill color of its shape.
node_opacity : float between 0.0 and 1.0
Set an opacity for each node, which becomes the opacity of its shape.
Caution: This is only supported by d3.
node_size : int, float, Callable
Set a size for each node, which becomes the height and width of its shape.
node_shape : str, Callable
Set a shape for each node, which is some geometrical form that has the
node coordinates in its center.
Possible values: ``"circle"``, ``"rectangle"``, ``"hexagon"``
node_border_color : str, Callable
Set a border color for each node, which influences the border drawn around its shape.
node_border_size : int, float, Callable
Set a border size for each node, which influences the border drawn around its shape.
node_label_color : str, Callable
Set a label color for each node, which determines the font color
of the text below the node.
node_label_size : int, float, Callable
Set a label size for each node, which determines the font size
of the text below the node.
node_hover : str, Callable
Set a hover text for each node, which shows up besides the mouse cursor
when hovering over a node.
node_click : str, Callable
Set a click text for each node, which shows up in a div element below the plot
when clicking on a node and can easily be copied and pasted.
node_image : str, Callable
Set an image for each node, which appears within its shape.
Possible values:
- URL pointing to an image
- Data URL encoding the image
node_properties : str, dict, Callable
Set additional properties for each node, which may not immediately be translated
into a visual element, but can be chosen in the data selection menu in the
interactive HTML visualizations to map them on some plot element.
These properties also appear when exporting a graph to a file in a format
such as GML and may be recognized by external visualization tools.
Note that a Callable needs to return a dict in this case, and each key becomes
a property, which is equivalent to the other properties such as node_size and
node_color.
Special cases:
- ``node_properties="tv"`` is a shortcut for using a function that returns
``{"mean": atom.tv.mean, "confidence": atom.tv.confidence}``
- Keys ``"x"``, ``"y"`` and ``"z"`` properties are translated into node coordinates.
Examples:
- ``dict(x=0.0)``: This fixes the x coordinate of each node to 0.0, so that the
JavaScript layout algorithm does not influence it, but the nodes remain
free to move in the y and z directions.
- ``lambda atom: dict(x=2.0) if atom.is_node() else None``:
This fixes the x coordinate of each Atom of type Node to 2.0
but allows each Atom of type Link to move freely.
- ``lambda atom: dict(y=-len(atom.out)*100) if atom.is_link() else dict(y=0)``
This fixes the y coordinates of Atoms at different heights. Atoms of type Node
are put at the bottom and Atoms of type Link are ordered by the number of their
outgoing edges. The results is a hierarchical visualization that has some
similarity with the "dot" layout.
- ``lambda atom: dict(x=-100) if atom.is_node() else dict(x=100)``:
This fixes the x coordinate of Node Atoms at -100 and of Link Atoms at 100.
The results is a visualization with two lines of nodes that has some
similarity with the "bipartite" layout.
edge_label : str, Callable
Set a label for each edge, which becomes the text plotted in the middle of the edge.
edge_color : str, Callable
Set a color for each edge, which becomes the color of the line representing the edge.
edge_opacity : int, float, Callable
Set an opacity for each edge, which allows to make it transparent to some degree.
edge_size : int, float, Callable
Set a size for each edge, which becomes the width of the line representing the edge.
edge_label_color : str, Callable
Set a color for each edge label, which becomes the color of the text in the midpoint
of the edge.
edge_label_size : int, float, Callable
Set a size for each edge label, which becomes the size of the text in the midpoint
of the edge.
edge_hover : str, Callable
edge_click : str, Callable
Returns
-------
graph : NetworkX Graph or DiGraph
Whether an undirected or directed graph is created depends on the argument "directed".
"""
# Argument processing
_check_arg(data, 'data', (list, _AtomSpace))
_check_arg(graph_annotated, 'graph_annotated', bool)
_check_arg(graph_directed, 'graph_directed', bool)
_check_arg(node_label, 'node_label', (str, _Callable), allow_none=True)
_check_arg(node_color, 'node_color', (str, _Callable), allow_none=True)
_check_arg(node_opacity, 'node_opacity', (int, float, _Callable), allow_none=True)
_check_arg(node_size, 'node_size', (int, float, _Callable), allow_none=True)
_check_arg(node_shape, 'node_shape', (str, _Callable), allow_none=True)
_check_arg(node_border_color, 'node_border_color', (str, _Callable), allow_none=True)
_check_arg(node_border_size, 'node_border_size', (int, float, _Callable), allow_none=True)
_check_arg(node_label_color, 'node_label_color', (str, _Callable), allow_none=True)
_check_arg(node_label_size, 'node_label_size', (int, float, _Callable), allow_none=True)
_check_arg(node_hover, 'node_hover', (str, _Callable), allow_none=True)
_check_arg(node_click, 'node_click', (str, _Callable), allow_none=True)
_check_arg(node_image, 'node_image', (str, _Callable), allow_none=True)
_check_arg(node_properties, 'node_properties', (str, dict, _Callable), allow_none=True)
_check_arg(edge_label, 'edge_label', (str, _Callable), allow_none=True)
_check_arg(edge_color, 'edge_color', (str, _Callable), allow_none=True)
_check_arg(edge_opacity, 'edge_opacity', (int, float, _Callable), allow_none=True)
_check_arg(edge_size, 'edge_size', (int, float, _Callable), allow_none=True)
_check_arg(edge_label_color, 'edge_label_color', (str, _Callable), allow_none=True)
_check_arg(edge_label_size, 'edge_label_size', (int, float, _Callable), allow_none=True)
_check_arg(edge_hover, 'edge_hover', (str, _Callable), allow_none=True)
_check_arg(edge_click, 'edge_click', (str, _Callable), allow_none=True)
# Prepare annoation functions
if graph_annotated:
node_ann = prepare_node_func(
node_label, node_color, node_opacity, node_size, node_shape, node_border_color,
node_border_size, node_label_color, node_label_size, node_hover, node_click,
node_image, node_properties)
edge_ann = prepare_edge_func(
edge_label, edge_color, edge_opacity, edge_size,
edge_label_color, edge_label_size, edge_hover, edge_click)
else:
empty = dict()
def node_ann(atom):
return empty
def edge_ann(atom1, atom2):
return empty
# Create the NetworkX graph
graph = _nx.DiGraph() if graph_directed else _nx.Graph()
# 0) Set graph annotations
graph.graph['node_click'] = '$hover' # node_click will by default show content of node_hover
# 1) Add vertices and their annotations
for atom in data:
graph.add_node(to_uid(atom), **node_ann(atom))
# 2) Add edges and their annotations (separate step to exclude edges to filtered vertices)
for atom in data:
uid = to_uid(atom)
if atom.is_link():
# for all that is incoming to the Atom
for atom2 in atom.incoming:
uid2 = to_uid(atom2)
if uid2 in graph.nodes:
graph.add_edge(uid2, uid, **edge_ann(atom2, atom))
# for all that is outgoing of the Atom
for atom2 in atom.out:
uid2 = to_uid(atom2)
if uid2 in graph.nodes:
graph.add_edge(uid, uid2, **edge_ann(atom, atom2))
return graph
def prepare_node_func(node_label, node_color, node_opacity, node_size, node_shape,
node_border_color, node_border_size, node_label_color, node_label_size,
node_hover, node_click, node_image, | |
<reponame>jrhartog/aqms-ir
"""
Classes that describe tables
"""
import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import text
from sqlalchemy import Column, DateTime, Integer, Numeric, String, ForeignKey
from sqlalchemy import Sequence
# create the base class of all ORM classes
Base = declarative_base()
class Abbreviation(Base):
__tablename__ = "d_abbreviation"
id = Column('id', Integer, Sequence('abbseq'), primary_key=True, nullable=False)
description = Column('description', String)
def __repr__(self):
return "Abbreviation: id={}, description={}".\
format(self.id, self.description)
class Unit(Base):
__tablename__ = "d_unit"
id = Column('id', Integer, Sequence('uniseq'), primary_key=True, nullable=False)
name = Column('name', String(80))
description = Column('description', String(70))
def __repr__(self):
return "Unit: id={}, name={}, description={}".format(\
self.id, self.name, self.description)
class Format(Base):
__tablename__ = "d_format"
id = Column('id', Integer, Sequence('forseq'), primary_key=True, nullable=False)
name = Column('name', String(80), default="UNKNOWN")
family = Column('family', Integer, nullable=False, default=50)
ms_id = Column('ms_id', Integer, nullable=False, default=0)
def __repr__(self):
return "Format: id={}, name={}, family={}, ms_id={}".format(\
self.id, self.name, self.family, self.ms_id)
class Station(Base):
__tablename__ = "station_data"
net = Column('net', String, primary_key=True, nullable=False)
sta = Column('sta', String, primary_key=True, nullable=False)
ondate = Column('ondate', DateTime, primary_key=True, nullable=False)
lat = Column('lat', Numeric)
lon = Column('lon', Numeric)
elev = Column('elev', Numeric)
staname = Column('staname', String)
net_id = Column('net_id', ForeignKey('d_abbreviation.id'),
info="key to network description in d_abbreviation")
word_32 = Column('word_32', Numeric, nullable=False, default=3210)
word_16 = Column('word_16', Numeric, nullable=False, default=10)
offdate = Column('offdate', DateTime, default=datetime.datetime(3000,1,1))
lddate = Column('lddate', DateTime, server_default=text('NOW()'))
def __repr__(self):
return "Station: net={}, sta={}, ondate={}, staname={}, lat={}, lon={}, elev={}".\
format(self.net,self.sta,self.ondate.isoformat(),self.staname,self.lat,self.lon,self.elev)
class Channel(Base):
__tablename__ = "channel_data"
net = Column('net', String(8), primary_key=True, nullable=False)
sta = Column('sta', String(6), primary_key=True, nullable=False)
seedchan = Column('seedchan', String(3), primary_key=True, nullable=False)
location = Column('location', String(2), primary_key=True, nullable=False)
ondate = Column('ondate', DateTime, primary_key=True, nullable=False)
channel = Column('channel', String(8))
channelsrc = Column('channelsrc', String(8), default="SEED")
inid = Column('inid', ForeignKey('d_abbreviation.id'),
info="key to instrument description in d_abbreviation")
remark = Column('remark', String(30))
unit_signal = Column('unit_signal', ForeignKey('d_unit.id'),
info="key to ground motion signal unit description in d_unit")
unit_calib = Column('unit_calib', ForeignKey('d_unit.id'),
info="key to calibration signal unit description in d_unit")
lat = Column('lat', Numeric)
lon = Column('lon', Numeric)
elev = Column('elev', Numeric)
edepth = Column('edepth', Numeric)
azimuth = Column('azimuth', Numeric)
dip = Column('dip', Numeric)
format_id = Column('format_id', ForeignKey('d_format.id'),
info="key to data format description in d_format", nullable=False)
record_length = Column('record_length', Integer)
samprate = Column('samprate', Numeric, nullable=False)
clock_drift = Column('clock_drift', Numeric)
flags = Column('flags', String(27), info="channel flags", default="CG")
offdate = Column('offdate', DateTime, default=datetime.datetime(3000,1,1))
lddate = Column('lddate', DateTime, server_default=text('NOW()'))
def __repr__(self):
return "Channel: net={}, sta={}, seedchan={}, location={}, ondate={}, offdate={}".\
format(self.net, self.sta, self.seedchan, self.location, self.ondate, self.offdate)
class SimpleResponse(Base):
__tablename__ = "simple_response"
net = Column('net', String(8), primary_key=True, nullable=False)
sta = Column('sta', String(6), primary_key=True, nullable=False)
seedchan = Column('seedchan', String(3), primary_key=True, nullable=False)
location = Column('location', String(2), primary_key=True, nullable=False)
ondate = Column('ondate', DateTime, primary_key=True, nullable=False)
channel = Column('channel', String(8))
channelsrc = Column('channelsrc', String(8), default="SEED")
natural_frequency = Column('natural_frequency', Numeric)
damping_constant = Column('damping_constant', Numeric)
gain = Column('gain', Numeric)
gain_units = Column('gain_units', String)
low_freq_corner = Column('low_freq_corner', Numeric)
high_freq_corner = Column('high_freq_corner', Numeric)
offdate = Column('offdate', DateTime, default=datetime.datetime(3000,1,1))
lddate = Column('lddate', DateTime, server_default=text('NOW()'))
dlogsens = Column('dlogsens', Numeric)
def __repr__(self):
return "SimpleResponse: net={}, sta={}, seedchan={}, location={}, ondate={}, \
offdate={}, gain={} ({}), low_freq_cutoff={}, high_freq_cutoff={}, \
natural_frequency={}, damping_constant={}".\
format(self.net, self.sta, self.seedchan, self.location, self.ondate, \
self.offdate, self.gain, self.gain_units, self.low_freq_corner, self.high_freq_corner, \
self.natural_frequency, self.damping_constant)
class AmpParms(Base):
__tablename__ = "channelmap_ampparms"
net = Column('net', String(8), primary_key=True, nullable=False)
sta = Column('sta', String(6), primary_key=True, nullable=False)
seedchan = Column('seedchan', String(3), primary_key=True, nullable=False)
location = Column('location', String(2), primary_key=True, nullable=False)
ondate = Column('ondate', DateTime, primary_key=True, nullable=False)
offdate = Column('offdate', DateTime, default=datetime.datetime(3000,1,1))
channel = Column('channel', String(8))
channelsrc = Column('channelsrc', String(8), default="SEED")
clip = Column('clip', Numeric)
lddate = Column('lddate', DateTime, server_default=text('NOW()'))
def __repr__(self):
return "AmpParms: net={}, sta={}, seedchan={}, location={}, ondate={}, \
offdate={}, clip={}".\
format(self.net, self.sta, self.seedchan, self.location, self.ondate, \
self.offdate, self.clip)
class CodaParms(Base):
__tablename__ = "channelmap_codaparms"
net = Column('net', String(8), primary_key=True, nullable=False)
sta = Column('sta', String(6), primary_key=True, nullable=False)
seedchan = Column('seedchan', String(3), primary_key=True, nullable=False)
location = Column('location', String(2), primary_key=True, nullable=False)
ondate = Column('ondate', DateTime, primary_key=True, nullable=False)
offdate = Column('offdate', DateTime, default=datetime.datetime(3000,1,1))
channel = Column('channel', String(8))
channelsrc = Column('channelsrc', String(8), default="SEED")
cutoff = Column('cutoff', Numeric)
gain_corr = Column('gain_corr', Numeric)
summary_wt = Column('summary_wt', Numeric)
lddate = Column('lddate', DateTime, server_default=text('NOW()'))
def __repr__(self):
return "CodaParms: net={}, sta={}, seedchan={}, location={}, ondate={}, \
offdate={}, cutoff={}, gain_corr={}, summary_wt={}".\
format(self.net, self.sta, self.seedchan, self.location, self.ondate, \
self.offdate, self.cutoff, self.gain_corr, self.summary_wt)
class Sensitivity(Base):
__tablename__ = "sensitivity"
net = Column('net', String(8), primary_key=True, nullable=False)
sta = Column('sta', String(6), primary_key=True, nullable=False)
seedchan = Column('seedchan', String(3), primary_key=True, nullable=False)
location = Column('location', String(2), primary_key=True, nullable=False)
ondate = Column('ondate', DateTime, primary_key=True, nullable=False)
offdate = Column('offdate', DateTime, default=datetime.datetime(3000,1,1))
stage_seq = Column('stage_seq', Integer)
channel = Column('channel', String(8))
channelsrc = Column('channelsrc', String(8), default="SEED")
sensitivity = Column('sensitivity', Numeric)
frequency = Column('frequency', Numeric)
lddate = Column('lddate', DateTime, server_default=text('NOW()'))
def __repr__(self):
return "Sensitivity: net={}, sta={}, seedchan={}, location={}, ondate={}, \
offdate={}, stage_seq={}, sensitivity={}, frequency={}".\
format(self.net, self.sta, self.seedchan, self.location, self.ondate, \
self.offdate, self.stage_seq, self.sensitivity, self.frequency)
"""
archdb1=> \d poles_zeros
Table "trinetdb.poles_zeros"
Column | Type | Collation | Nullable | Default
------------+-----------------------------+-----------+----------+------------------------------------------
net | character varying(8) | | not null |
sta | character varying(6) | | not null |
seedchan | character varying(3) | | not null |
location | character varying(2) | | not null |
ondate | timestamp without time zone | | not null |
stage_seq | integer | | not null |
channel | character varying(8) | | |
channelsrc | character varying(8) | | |
offdate | timestamp without time zone | | |
pz_key | integer | | not null |
tf_type | character varying(1) | | |
unit_in | integer | | not null |
unit_out | integer | | not null |
ao | double precision | | not null |
af | double precision | | |
lddate | timestamp without time zone | | | timezone('UTC'::text, CURRENT_TIMESTAMP)
Indexes:
"p_z00" PRIMARY KEY, btree (net, sta, seedchan, location, ondate, stage_seq)
"""
class Poles_Zeros(Base):
__tablename__ = "poles_zeros"
net = Column('net', String(8), primary_key=True, nullable=False)
sta = Column('sta', String(6), primary_key=True, nullable=False)
seedchan = Column('seedchan', String(3), primary_key=True, nullable=False)
location = Column('location', String(2), primary_key=True, nullable=False)
ondate = Column('ondate', DateTime, primary_key=True, nullable=False)
offdate = Column('offdate', DateTime, default=datetime.datetime(3000,1,1))
channel = Column('channel', String(8))
channelsrc = Column('channelsrc', String(8), default="SEED")
#stage_seq = Column('stage_seq', Integer)
stage_seq = Column('stage_seq', Integer, primary_key=True, nullable=False)
tf_type = Column('tf_type', String(1))
pz_key = Column('pz_key', ForeignKey('pz.key'),
info="key to PZ to get to list of PZ_Data rows", nullable=False)
unit_in = Column('unit_in', Integer, nullable=False)
unit_out = Column('unit_out', Integer, nullable=False)
ao = Column('ao', Numeric, nullable=False)
af = Column('af', Numeric)
lddate = Column('lddate', DateTime, server_default=text('NOW()'))
def __repr__(self):
return "Poles_Zeros: net={}, sta={}, seedchan={}, location={}, ondate={}, \
offdate={}, stage_seq={}, ao={}, af={}, unit_in={}, unit_out={}".\
format(self.net, self.sta, self.seedchan, self.location, self.ondate, \
self.offdate, self.stage_seq, self.ao, self.af, self.unit_in, self.unit_out)
"""
archdb1=> \d pz
Table "trinetdb.pz"
Column | Type | Collation | Nullable | Default
--------+-----------------------------+-----------+----------+------------------------------------------
key | integer | | not null |
name | character varying(80) | | |
lddate | timestamp without time zone | | | timezone('UTC'::text, CURRENT_TIMESTAMP)
Indexes:
"pz00" PRIMARY KEY, btree (key)
"""
class PZ(Base):
__tablename__ = "pz"
key = Column('key', Integer, Sequence('pzseq'), primary_key=True, nullable=False)
name = Column('name', String(80))
lddate = Column('lddate', DateTime, server_default=text('NOW()'))
def __repr__(self):
return "class PZ: key={}, name=[{}]".format(self.key, self.name)
"""
archdb1=> \d pz_data
Table "trinetdb.pz_data"
Column | Type | Collation | Nullable | Default
---------+----------------------+-----------+----------+---------
key | integer | | not null |
row_key | integer | | not null |
type | character varying(1) | | |
r_value | double precision | | not null |
r_error | double precision | | |
i_value | double precision | | not null |
i_error | double precision | | |
Indexes:
"pzd00" PRIMARY KEY, btree (key, row_key)
"""
class PZ_Data(Base):
__tablename__ = "pz_data"
key = Column('key', Integer, primary_key=True, nullable=False)
row_key = Column('row_key', Integer, primary_key=True, nullable=False)
pztype = Column('type', String(1))
r_value = Column('r_value', Numeric, nullable=False)
r_error = Column('r_error', Numeric)
i_value = Column('i_value', Numeric, nullable=False)
i_error = Column('i_error', Numeric)
def __repr__(self):
return "PZ_Data: key={}, row_key={}, type={}, r_value={}, i_value={}".\
format(self.key, self.row_key, self.type, self.r_value, self.i_value)
class StaCorrection(Base):
__tablename__ = "stacorrections"
net = Column('net', String(8), primary_key=True, nullable=False)
sta = Column('sta', String(6), primary_key=True, nullable=False)
seedchan = Column('seedchan', String(3), primary_key=True, nullable=False)
location = Column('location', String(2), primary_key=True, nullable=False)
ondate = Column('ondate', DateTime, primary_key=True, nullable=False)
offdate = Column('offdate', DateTime, default=datetime.datetime(3000,1,1))
channel = Column('channel', String(8))
channelsrc = Column('channelsrc', String(8), default="SEED")
auth = Column('auth', String(15), | |
<reponame>sharksmhi/ctd_processing<filename>ctd_processing/former_cnv.py
import os
from pathlib import Path
from time import gmtime, strftime
from ctd_processing import exceptions
from ctd_processing import utils
class CNVparameter:
def __init__(self, use_cnv_info_format=False, cnv_info_object=None, **data):
self.info = {}
for key, value in data.items():
if key in ['index']:
value = int(value)
self.info[key] = value
setattr(self, key, value)
self.use_cnv_info_format = use_cnv_info_format
self.cnv_info_object = cnv_info_object
self._tot_value_length = 11
self._value_format = 'd'
self._nr_decimals = None
self.sample_value = None
self._data = []
self.active = False
def __repr__(self):
return_list = [f'CNVparameter (dict): {self.info["name"]}']
blanks = ' '*4
for key, value in self.info.items():
return_list.append(f'{blanks}{key:<20}{value}')
if len(self._data):
return_list.append(f'{blanks}{"Sample value":<20}{self.sample_value}')
if self.use_cnv_info_format:
form = f'{self.format} (from info file)'
else:
form = f'{self.format} (calculated from data)'
return_list.append(f'{blanks}{"Value format":<20}{form}')
return '\n'.join(return_list)
def _set_nr_decimals(self, value_str):
# Keeps the highest number och decimals in self._nr_decimals
# Also saves sample_value
if self._nr_decimals is None:
self._nr_decimals = len(value_str.strip().split('e')[0].split('.')[-1])
self.sample_value = float(value_str)
else:
nr = len(value_str.strip().split('e')[0].split('.')[-1])
if nr > self._nr_decimals:
self._nr_decimals = nr
self.sample_value = float(value_str)
@property
def format(self):
if self.use_cnv_info_format:
return self.cnv_info_object.format
if self._nr_decimals is None:
form = f'{self._tot_value_length}{self._value_format}'
else:
form = f'{self._tot_value_length}.{self._nr_decimals}{self._value_format}'
return form
def set_value_length(self, length):
self._tot_value_length = length
def add_data(self, value_str):
string = value_str.strip('+-')
if '+' in string or '-' in string:
self._value_format = 'e'
elif '.' in value_str:
self._value_format = 'f'
if '.' in value_str:
self._set_nr_decimals(value_str)
value = float(value_str)
else:
value = int(value_str)
self._value_format = 'd'
self._data.append(value)
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
def change_name(self, new_name):
self.info['name'] = new_name
self.name = new_name
def get_value_as_string_for_index(self, index):
return '{:{}}'.format(self.data[index], self.format)
def set_active(self, is_active):
self.active = is_active
class CNVheader:
def __init__(self, linebreak='\n'):
self.linebreak = linebreak
self.rows = []
def add_row(self, row):
self.rows.append(row.strip())
def insert_row_after(self, row, after_str, ignore_if_string=None):
for line in self.rows:
if row == line:
return
for i, value in enumerate(self.rows[:]):
if after_str in value:
if ignore_if_string:
if ignore_if_string in self.rows[i+1]:
continue
self.rows.insert(i+1, row.strip())
break
def append_to_row(self, string_in_row, append_string):
for i, value in enumerate(self.rows[:]):
if string_in_row in value:
new_string = self.rows[i] + append_string.rstrip()
if self.rows[i] == new_string:
continue
self.rows[i] = new_string
break
def get_row_index_for_matching_string(self, match_string, as_list=False):
index = []
for i, value in enumerate(self.rows):
if match_string in value:
index.append(i)
if not index:
return None
if as_list:
return index
if len(index) == 1:
return index[0]
return index
def replace_string_at_index(self, index, from_string, to_string, ignore_if_present=True):
if index is None:
return
if type(index) == int:
index = [index]
for i in index:
if to_string in self.rows[i] and ignore_if_present:
continue
self.rows[i] = self.rows[i].replace(from_string, to_string)
def replace_row(self, index, new_value):
self.rows[index] = new_value.strip()
class CNVfile:
def __init__(self, file_path, ctd_processing_object=None, **kwargs):
self.file_path = Path(file_path)
self.ctd_processing_object = ctd_processing_object
self.cnv_info_object = self.ctd_processing_object.cnv_info_object
self.use_cnv_info_format = self.ctd_processing_object.use_cnv_info_format
self._load_ctd_processing_object_info()
self.parameters = {}
self.header = CNVheader()
self.data = {}
self.nr_data_lines = None
self.linebreak = kwargs.get('linebreak', '\n')
self.missing_value = -9.990e-29
self.missing_value_str = '-9.990e-29'
self.g = 9.818 # g vid 60 gr nord (dblg)
self._load_info()
self._save_columns()
self._set_active_parameters()
def modify(self):
self._check_index()
self._modify_header_information()
self._modify_irradiance()
self._modify_fluorescence()
self._modify_depth()
def save_file(self, file_path, overwrite=False):
file_path = Path(file_path)
if file_path.exists() and not overwrite:
raise exceptions.FileExists(file_path)
if not file_path.parent.exists():
os.makedirs(file_path.parent)
all_rows = []
all_rows.extend(self.header.rows)
all_rows.extend(self._get_data_rows())
all_rows.append('')
with open(file_path, 'w') as fid:
fid.write(self.linebreak.join(all_rows))
def _get_data_rows(self):
data_rows = []
for r in range(self.nr_data_lines):
line_list = []
for par, obj in self.parameters.items():
value = obj.get_value_as_string_for_index(r)
line_list.append(value)
line_string = ''.join(line_list)
data_rows.append(line_string)
return data_rows
def _load_ctd_processing_object_info(self):
if self.ctd_processing_object:
self.cnv_info_object = self.ctd_processing_object.cnv_info_object
self.year = self.ctd_processing_object.year
self.ctry = self.ctd_processing_object.ctry
self.ship = self.ctd_processing_object.ship
self.serie = self.ctd_processing_object.serial_number
def _load_info(self):
header = True
has_set_value_length = False
self.nr_data_lines = 0
with open(self.file_path) as fid:
for r, line in enumerate(fid):
strip_line = line.strip()
if '*END*' in line:
self.header.add_row(line)
header = False
continue
if strip_line.startswith('# name'):
name, par = [item.strip() for item in strip_line.split('=', 1)]
index = name.split(' ')[-1]
obj = CNVparameter(use_cnv_info_format=self.use_cnv_info_format,
cnv_info_object=self.cnv_info_object[int(index)],
index=index, name=par)
self.parameters[obj.index] = obj
if header:
self.header.add_row(line)
else:
if not line.strip():
continue
self.nr_data_lines += 1
split_line = strip_line.split()
if not has_set_value_length:
tot_len = len(line.rstrip())
value_length = tot_len / len(split_line)
int_value_lenght = int(value_length)
if int_value_lenght != value_length:
raise ValueError('Something is wrong in the file!')
for i, value in enumerate(split_line):
self.parameters[i].set_value_length(int_value_lenght)
has_set_value_length = True
for i, value in enumerate(split_line):
self.parameters[i].add_data(value)
def _save_columns(self):
self.col_pres = None
self.col_dens = None
self.col_dens2 = None
self.col_depth = None
self.col_sv = None
for par in self.parameters.values():
if 'prDM: Pressure, Digiquartz [db]' in par.name:
self.col_pres = par.index
elif 'sigma-t00: Density [sigma-t' in par.name:
self.col_dens = par.index
elif 'sigma-t11: Density, 2 [sigma-t' in par.name:
self.col_dens2 = par.index
elif 'depFM: Depth [fresh water, m]' in par.name:
self.col_depth = par.index
elif 'depFM: Depth [true depth, m]' in par.name:
self.col_depth = par.index
elif 'svCM: Sound Velocity [Chen-Millero, m/s]' in par.name:
self.col_sv = par.index
def _set_active_parameters(self):
for i, info in self.cnv_info_object.items():
self.parameters[i].set_active(info.active)
def _change_parameter_name(self, current_name, new_name):
for par in self.parameters.values():
if par.name == new_name:
return
for par in self.parameters.values():
if current_name == par.name:
par.change_name(new_name)
def _get_parameter_name_matching_string(self, match_string):
for par in self.parameters.values():
if match_string in par.name:
return par.name
def _check_index(self):
if not self.cnv_info_object:
raise exceptions.MissingAttribute('cnv_info_object')
for info, cnv in zip(self.cnv_info_object.values(), self.parameters.values()):
if 'depFM: Depth [true depth, m], lat' in info.name:
continue
if info.name not in cnv.name:
print(info.name)
print(cnv.name)
raise exceptions.InvalidParameterIndex(f'Index stämmer inte i cnv för parameter: {info.name}')
cnv.active = True
# Här borde man kunna definiera sensor_index, dvs första kolumnen i self.cnv_column_info
# den kommer automatiskt efter så som DatCnv.psa är inställd
# Börjar med att kolla så det iaf är korrekt
def _get_pressure_data(self):
return self.parameters[self.col_pres].data
def _get_depth_data(self):
return self.parameters[self.col_depth].data
def _get_sound_velocity_data(self):
return self.parameters[self.col_sv].data
def _get_density_data(self):
if self.parameters[self.col_dens].active:
return self.parameters[self.col_dens].data
elif self.parameters[self.col_dens2].active:
return self.parameters[self.col_dens2].data
else:
return [self.missing_value]*self.nr_data_lines
def _get_calculated_true_depth(self):
prdM_data = self._get_pressure_data()
sigT_data = self._get_density_data()
# Beräkning av truedepth # Ersätt depFM med true depth i headern
# Start params
dens_0 = (sigT_data[0] + 1000.) / 1000. # ' start densitet
p_0 = 0
depth = 0
true_depth = []
for q in range(len(prdM_data)):
if sigT_data[q] != self.missing_value:
# decibar till bar (dblRPres)
rpres = prdM_data[q] * 10.
# Beräknar densitet (dblDens)
dens = (sigT_data[q] + 1000.) / 1000.
# Beräknar delta djup (dblDDjup)
ddepth = (rpres - p_0) / ((dens + dens_0) / 2. * self.g)
# Summerar alla djup och använd framräknande trycket i nästa loop
# Om det är första (ej helt relevant kanske) eller sista värdet dela med två enl. trappetsmetoden
dens_0 = dens
# if q == 0 or q == (len(prdM)-1):
# Depth = Depth + DDepth / 2.
# else:
# Depth = Depth + DDepth
# Ändrad av Örjan 2015-02-10 /2. första och sista djupet borttaget.
depth = depth + ddepth
# Spara framräknat djup för nästa loop
p_0 = rpres
# Sparar undan TrueDepth
true_depth.append(depth)
else:
true_depth.append(self.missing_value)
return true_depth
def _get_mean_sound_velocity(self):
svCM_data = self._get_sound_velocity_data()
return sum(svCM_data) / len(svCM_data)
def _modify_header_information(self):
svMean = self._get_mean_sound_velocity()
now = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
after_str = '** Ship'
rows_to_insert = [f'** Average sound velocity: {str("%6.2f" % svMean)} m/s',
f'** True-depth calculation {now}',
# f'** CTD Python Module SMHI /ver 3-12/ feb 2012',
f'** Python Module: ctd_processing, nov 2020',
f'** LIMS Job: {self.year}{self.ctry}{self.ship}-{self.serie}'
]
for row in rows_to_insert:
if 'True-depth calculation' in row:
self.header.insert_row_after(row, after_str, ignore_if_string='True-depth calculation')
else:
self.header.insert_row_after(row, after_str)
after_str = row
def _modify_irradiance(self):
self.header.append_to_row('par: PAR/Irradiance', ' [µE/(cm^2*s)]')
def _modify_fluorescence(self):
# Lägger till Chl-a på de fluorometrar som har beteckning som börjar på FLNTURT
par_name_1 = self._get_parameter_name_matching_string('Fluorescence, WET Labs ECO-AFL/FL [mg/m^3]')
fluo_index_1 = self.header.get_row_index_for_matching_string('Fluorescence, WET Labs ECO-AFL/FL [mg/m^3]')
fluo_xml_index_1 = self.header.get_row_index_for_matching_string('Fluorometer, WET Labs ECO-AFL/FL -->')
serial_index_1 = self.header.get_row_index_for_matching_string('<SerialNumber>FLNTURT', as_list=True)
par_name_2 = self._get_parameter_name_matching_string('Fluorescence, WET Labs ECO-AFL/FL, 2 [mg/m^3]')
fluo_index_2 = self.header.get_row_index_for_matching_string('Fluorescence, WET Labs ECO-AFL/FL, 2 [mg/m^3]')
fluo_xml_index_2 = self.header.get_row_index_for_matching_string('Fluorometer, WET Labs ECO-AFL/FL, 2 -->')
serial_index_2 = self.header.get_row_index_for_matching_string('<SerialNumber>FLPCRTD', as_list=True)
if fluo_xml_index_1 and (fluo_xml_index_1 + 2) in serial_index_1:
self.header.replace_string_at_index(fluo_xml_index_1, 'Fluorometer', 'Chl-a Fluorometer')
self.header.replace_string_at_index(fluo_index_1, 'Fluorescence', 'Chl-a Fluorescence')
new_par_name_1 = par_name_1.replace('Fluorescence', 'Chl-a Fluorescence')
self._change_parameter_name(par_name_1, new_par_name_1)
if fluo_xml_index_2 and (fluo_xml_index_2 + 2) in serial_index_2:
self.header.replace_string_at_index(fluo_xml_index_2, 'Fluorometer', 'Phycocyanin Fluorometer')
self.header.replace_string_at_index(fluo_index_2, 'Fluorescence', 'Phycocyanin Fluorescence')
new_par_name_2 = par_name_2.replace('Fluorescence', 'Phycocyanin Fluorescence')
self._change_parameter_name(par_name_2, new_par_name_2)
def _modify_depth(self):
index = self.header.get_row_index_for_matching_string('depFM: Depth [fresh water, m]')
self.header.replace_string_at_index(index, 'fresh water', 'true depth')
par_name = self._get_parameter_name_matching_string('depFM: Depth [fresh water, m]')
if par_name:
new_par_name = par_name.replace('fresh water', 'true depth')
self._change_parameter_name(par_name, new_par_name)
| |
<gh_stars>1-10
from sys import stdout
from evaluate_expression import *
from operations_and_expressions import *
from write import *
regs = ["r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"]
relational_ops = [ ">", ">=", "<", "<=", "!=", "=="]
allocationTable = {}
globalTable = {}
stack = []
BYTE_SIZE = 8
IF_NUMBER = 0
WHILE_NUMBER = 0
program = []
functions = []
PART = 0
STRING_COUNT = 0
# Starts generating the assembly code.
def generate(tree, filePath):
global stack
global program
global regs
global printf
global scanf
global PART
tree = cleanList(tree)
stopPoint = 0
while (tree[stopPoint][0] == "DECL"):
globalDecl(tree[stopPoint])
stopPoint += 1
if (tree[0] == "MAINF"):
tree = [tree]
for i in range(stopPoint, len(tree)):
branch = tree[i]
key = branch[0]
if (key == "MAINF"):
PART = 0
genMain(branch)
elif (key == "VOIDF"):
PART = 1
genVoid(branch)
elif (key == "TYPEF"):
PART = 1
genType(branch)
writeToFile(filePath)
# Generates the main function.
def genMain(tree):
global PART
PART = 0
block = tree[4][1]
stack.append("__main_")
write("main:\n")
genBlock(block)
PART = 0
write("\tret\n")
stack.pop()
#Generates the code for a void function or an inner void/main (hatta) function
def genVoid(tree):
global PART
PART = 1
if (tree[0] == "MAINF"):
block = tree[4][1]
args = tree[2]
name = "hatta"
else:
block = tree[5][1]
args = tree[3]
name = tree[1]
stack.append("__" + name + "_")
write("\n_" + name + ":\n")
write("\tpush rbp\n")
write("\tmov rbp, rsp\n")
if (len(args) > 0):
getFunctionArgs(tree)
genBlock(block)
stack.pop()
write("\tpop rbp\n")
write("\tret\n")
PART = 0
# Generates instructions equivalent to a type function
def genType(tree):
global PART
global BYTE_SIZE
global regs
PART = 1
stack.append("__" + tree[1] + "_")
write("_"+tree[1]+":\n")
args = tree[3]
if (len(args) > 0):
getFunctionArgs(tree)
block = tree[7][1]
genBlock(block)
write("\tpop rbp\n")
write("\tret\n")
stack.pop()
PART = 0
# Gets the arguments of a function and places them in registers
def getFunctionArgs(tree):
args = tree[3]
if (not type(args[0]) is list):
args = [(args)]
write("\tpush rbp\n")
write("\tmov rbp, rsp\n")
rbp_count = len(args)*BYTE_SIZE + BYTE_SIZE
prefix = "__" + tree[1] + "_"
for arg in args:
aux_reg = regs.pop()
var = prefix + arg[1]
allocationTable.update({var:("[rbp+"+
str(rbp_count)+"]", arg[2])})
rbp_count -= BYTE_SIZE
# Assigns a value to a local variable.
def genAssign(tree):
global stack
global allocationTable
global regs
global STRING_COUNT
# If it's a global variable, we need an extra mov
if (tree[1] in globalTable and
getScopePlusVar(tree[1]) == tree[1]):
reg = regs.pop()
writeMov(reg,
genExpr(tree[2], getRegisterInScope(tree[1])))
writeMov(getRegisterInScope(tree[1]), reg)
else:
# If the value is a string, declare it in the data section
if (isString(cleanToFirstValue(tree[2]))):
comp = getRegisterInScope(tree[1])
if (not comp in allocationTable or comp != comp.__getitem__(0)):
value = cleanToFirstValue(tree[2])[1:-1]
writeToDataSection("\t"+tree[1]+" dd `"+
str(value)+"`,0\n")
writeMov(getRegisterInScope(tree[1]), tree[1])
return
value = cleanToFirstValue(tree[2])[1:-1]
writeToDataSection("\t"+tree[1]+" dd `"+
str(value)+"`,0\n")
if (getRegisterInScope(tree[1]) == ""):
reg = tree[1]
writeMov(regs.pop(), tree[1])
else:
reg = getRegisterInScope(tree[1])
writeMov(regs.pop(), getRegisterInScope(tree[1]))
# If it's a char or int, just put it in a register directly
else:
reg = getRegisterInScope(tree[1])
value = genExpr(tree[2], reg)
if (reg != value):
writeMov(reg, value)
# Allocates a register to the new variable by inserting it in the
# table with the given name in scope.
# If the declaration also includes assignment, evaluate the
# expression and assign it.
def genDecl(tree):
global stack
global allocationTable
global regs
varName = getAllStack() + tree[1]
regName = regs.pop()
typeVar = tree[2]
allocationTable.update({varName:(regName, typeVar)})
if (len(tree) == 5):
genAssign(["ASSIGN", tree[1], tree[4]])
return regName
# Global variables
def globalDecl(tree):
global allocationTable
global globalTable
if (tree[2] == "STRING"):
allocationTable.update({tree[1]:(tree[1], tree[2])})
if (len(tree) == 3):
if (not tree[1] in allocationTable and
tree[1] != allocationTable[tree[1]].__getitem__(0)):
genAssign(["ASSIGN", tree[1], "0"])
else:
genAssign(["ASSIGN", tree[1], tree[4]])
return
writeToDataSection("\t" + tree[1] + " dd")
writeToDataSection(" 0\n")
allocationTable.update({tree[1]:("["+tree[1]+"]", tree[2])})
globalTable.update({tree[1]:(0, tree[2])})
if (len(tree) > 3):
genAssign(["ASSIGN", tree[1], tree[4]])
# Generates a block of code into assembly code.
def genBlock(tree):
global regs
global stack
global allocationTable
if (not type(tree[0]) is list):
tree = [tree]
for t in tree:
if(t[0] == "OPEN"):
stack.append("_scope_")
genBlock(t[1])
stack.pop()
if(t[0] == "DECL"):
genDecl(t)
elif(t[0] == "ASSIGN"):
genAssign(t)
elif(t[0] == "IF"):
genIf(t)
elif(t[0] == "WHILE"):
genWhile(t)
elif(t[0] == "PRINT"):
genPrint(t)
elif(t[0] == "RET"):
genRet(t)
elif(t[0] == "DEC" or t[0] == "INC"):
genIncDec(t)
elif(t[0] == "READ"):
genRead(t)
elif(t[0] == "FULLSTOP"):
continue
elif(t[0] == "MAINF"):
stack.append("__hatta_")
genVoid(t)
stack.pop()
elif(t[0] == "TYPEF"):
stack.append("__" + t[1] + "_")
genType(t)
stack.pop()
elif(t[0] == "VOIDF"):
stack.append("__" + t[1] + "_")
genVoid(t)
stack.pop()
elif(t[1] == "LPAREN" and type(t[2]) is list and
t[3] == "RPAREN" and (not (t[0] == "IF"))
and (not (t[0] == "WHILE"))):
genFunctionCall(t)
# Print function in assembly.
def genPrint(tree):
global allocationTable
global regs
global stack
global STRING_COUNT
value = tree[1]
varStart = ""
printType = ""
# If we want to print a function call
if (type(tree[1]) is list and len(tree[1]) == 4 and
tree[1][1] == "LPAREN" and tree[1][3] == "RPAREN"):
genFunctionCall(tree[1])
reg = regs.pop()
writeMov(reg, "rax")
varStart = reg
printType = "writeInt"
regs.append(reg)
# If we want to print a value directly
elif (getRegisterInScope(cleanToFirstValue(value)) == ""
and isValue(value)):
# Special case for printing a string
if (isString(cleanToFirstValue(value))):
value = cleanToFirstValue(tree[1])[1:-1]
writeToDataSection("\ts"+str(STRING_COUNT)+" dd `"+
str(value)+"`, 0\n")
allocationTable.update({"s"+str(STRING_COUNT):(value, "STRING")})
varStart = "s"+str(STRING_COUNT)
printType = getPrintType("STRING")
STRING_COUNT += 1
else:
varStart = cleanToFirstValue(value)
printType = getPrintType(value)
# If we want to print a variable content
elif (len(value) == 1 and
getRegisterInScope(cleanToFirstValue(value)) != ""):
value = getScopePlusVar(value)
printType = allocationTable[value].__getitem__(1)
printType = getPrintType(printType)
varStart = getRegisterInScope(value)
elif (len(value) > 1):
# Multiple cases, should use genExpr
varStart = genExpr(value, varStart)
printType = "writeInt"
writeMov("rax", "0")
writeMov("rsi", varStart)
writeMov("rdi", printType)
write("\tcall printf\n")
def genRet(tree):
global regs
# If the function returns a negative variable
if (type(tree[1]) is list and tree[1][0] == "-"):
tree[1] = genExpr(tree[1], "rax")
negate(tree[1], "rax")
# If the function returns not value
elif (type(tree[1]) is list and tree[1][0] == "~"):
reg = regs.pop()
writeMov(reg, getRegisterInScope(tree[1][1]))
write("\tnot " + reg + "\n")
writeMov("rax", reg)
regs.append(reg)
else:
tree = cleanList(tree)
# If the function returns
if (len(tree) > 1):
to_return = getScopePlusVar(tree[1])
if (to_return != ""):
to_return = allocationTable[to_return]
to_return = to_return.__getitem__(0)
if (to_return[0] == "["):
aux_reg = regs.pop()
writeMov(aux_reg,to_return)
to_return = aux_reg
regs.append(aux_reg)
else:
to_return = tree[1]
writeMov("rax", to_return)
# Reads input using gcc function scanf
def genRead(tree):
global STRING_COUNT
var = getScopePlusVar(tree[1])
scanType = allocationTable[var].__getitem__(1)
reg = allocationTable[var].__getitem__(0)
write("\txor rax, rax\n")
writeMov("rdi", getPrintType(scanType))
pointer = "s" + str(STRING_COUNT)
writeToDataSection("\t" + pointer + " db 0\n")
writeMov("rsi", pointer)
write("\tcall scanf\n")
writeMov("rbx", "[" + pointer + "]")
writeMov(reg, "rbx")
STRING_COUNT += 1
# Generates the operations to calculate the value of an expression
def genExpr(tree, startVar):
global regs
if (startVar == ""):
startVar = regs.pop()
if (len(tree) == 1):
if (isValue(tree)):
return cleanToFirstValue(tree)
elif (len(tree) == 2):
reg = getRegisterInScope(cleanToFirstValue(tree[1]))
if (reg == ""):
reg = cleanToFirstValue(tree[1])
if (tree[0] == "~"):
writeMov(startVar, reg)
write("\tnot ", startVar, "\n")
return startVar
elif(tree[0] == "-"):
writeMov(startVar, reg)
write("\tneg ", startVar, "\n")
return startVar
#solve subExpressions of current expression
#and replace the variables with their values
for i in range(len(tree)):
if (len(tree[i]) > 1):
newStartVar = regs.pop()
tree[i] = genExpr(tree[i], newStartVar)
else:
#!!This if below might cause trouble, it should actually
#check that tree[i] is not an operand
if (i%2 == 0):
#If I have a value in my expression, e.g. x + 2
if (isValue(tree[i])):
tree[i] = cleanToFirstValue(tree[i])
else:
tree[i] = getRegisterInScope(tree[i])
#solve this expression
writeMov(startVar, tree[0])
while (len(tree) >= 3):
writeOp(startVar, tree[1], tree[2])
tree = tree[2:]
return startVar
# Inc, Dec
def genIncDec(tree):
global allocationTable
if(tree[0] == "DEC"):
op = "dec"
else:
op = "inc"
write("\t", op, " ",
allocationTable[getScopePlusVar(tree[1])].__getitem__(0), "\n")
# Generates a function call
def genFunctionCall(tree):
global BYTE_SIZE
args = tree[2]
if (len(args) == 1):
args = [args]
if (len(args) == 0):
write("\tcall _" + tree[0] + "\n")
else:
for arg in args:
var = getScopePlusVar(arg)
# If argument is a variable
if (var != ""):
var = allocationTable[var].__getitem__(0)
# If it's a value
else:
arg = cleanToFirstValue(arg)
declTree = ["DECL", cleanToFirstValue(arg)]
if (isString(arg)):
declTree.append('STRING')
elif (isChar(arg)):
declTree.append('CHAR')
else:
declTree.append('INT')
var = genDecl(declTree)
write("\tpush " + var + "\n")
write("\tcall _" + tree[0] + "\n")
write("\tadd rsp, " + str(BYTE_SIZE*len(args)) + | |
<reponame>swansonk14/entry-cli
###############################
import argparse
import os
import sys
import csv
from functools import partial
import openbabel as ob
import pybel
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
from tqdm import trange
from multiprocessing.context import TimeoutError
from multiprocessing import Pool
###############################
__doc__ = """Performs calculation of physiochemical properties of potential antibiotics. SMILES strings are parsed,
conformers are generated, and properties calculated. Properties include: chemical formula, molecular weight, rotatable
bonds, globularity, and PBF.
"""
PRIMARY_AMINE_SMARTS = pybel.Smarts('[$([N;H2;X3][CX4]),$([N;H3;X4+][CX4])]')
def main():
args = parse_args(sys.argv[1:])
if args.smiles:
properties = average_properties(args.smiles)
# A file will be written if command line option provide, otherwise write to stdout
if args.output:
mols_to_write = [properties]
write_csv(mols_to_write, args.output)
else:
report_properties(properties)
elif args.batch_file:
with open(args.batch_file) as f:
reader = csv.DictReader(f)
read_fieldnames = list(reader.fieldnames)
data = list(reader)
write_fieldnames = read_fieldnames + ['primary_amine', 'globularity', 'rotatable_bonds']
with Pool() as pool, open(args.output, 'w') as f:
writer = csv.DictWriter(f, fieldnames=write_fieldnames)
writer.writeheader()
iterator = pool.imap_unordered(partial(average_properties_safe, smiles_column=args.smiles_column), data)
for _ in trange(len(data)):
try:
row = iterator.next(timeout=args.timeout)
if row is not None:
writer.writerow(row)
except TimeoutError:
pass
def parse_args(arguments):
"""Parse the command line options.
:return: All script options
"""
parser = argparse.ArgumentParser(description=__doc__)
group = parser.add_mutually_exclusive_group()
group.add_argument("-s", "--smiles", dest="smiles", metavar="SMILES string", default=None)
group.add_argument("-b", "--batch", dest="batch_file", metavar="Batch file", default=None)
group.add_argument("-c", "--column", dest="smiles_column", metavar="Smiles column", default='canonical_smiles')
parser.add_argument("-o", "--output", dest="output", metavar="Output file", default=None,
help="Defaults to csv file with same name as input")
parser.add_argument("-t", "--timeout", dest="timeout", type=int, metavar="Timeout", default=10)
args = parser.parse_args(arguments)
if not args.smiles and not args.batch_file:
parser.error("Input structure is needed")
# If no output file is specified in batch mode, then replace the file extension of the input with .csv
if args.batch_file and not args.output:
args.output = os.path.splitext(args.batch_file)[0] + '.csv'
return args
def report_properties(properties):
"""
Write out the results of physiochemical properties to stdout
:param smiles: SMILES string of input molecule
:param properties: physiochemical properties to report
:type smiles: str
:type properties: dict
:return: None
"""
print("Properties for %s" % properties['smiles'])
print("--------------------------")
print("Mol. Wt.:\t%f" % properties['molwt'])
print("Formula:\t%s" % properties['formula'])
print("RB:\t\t%i" % properties['rb'])
print("Glob:\t\t%f" % properties['glob'])
print("PBF:\t\t%f" % properties['pbf'])
def parse_batch(filename):
"""
Read a file containing names and SMILES strings
Expects a file with no header in which each line contains a SMILES string followed by a name for the molecule.
SMILES and name can be separated by any whitespace.
:param filename: file to read
:type filename: str
:return: List of tuples with names and SMILES
:rtype: list
"""
smiles = []
names = []
with(open(filename, 'r')) as batch:
for line in batch:
(smi, name) = tuple(line.split())
smiles.append(smi)
names.append(name)
return zip(smiles, names)
def write_csv(mols_to_write, filename):
"""
Write out results of physiochemical properties
:param mols_to_write: list of molecule properties to write
:param filename: path to file to write
:type mols_to_write: list
:type filename: str
:return: None
"""
with(open(filename, 'w')) as out:
# fieldnames = ['smiles', 'formula', 'molwt', 'rb', 'glob', 'pbf']
fieldnames = ['smiles', 'formula', 'molwt', 'rb', 'glob', 'primary_amine']
writer = csv.DictWriter(out, fieldnames=fieldnames)
writer.writeheader()
for mol in mols_to_write:
writer.writerow(mol)
def average_properties_safe(row, smiles_column='smiles'):
try:
properties = average_properties(row[smiles_column])
row.update(properties)
return row
except Exception as e:
print(e)
return None
def average_properties(smiles):
"""
Calculate all relevant properties for a given molecule averaged across conformers
:param mol: input molecule smiles
:type mol: openbabel.OBMol
:return: dictionary of properties
:rtype dict
..todo: remove reliance on pybel
"""
mol = smiles_to_ob(smiles)
mols = run_confab(mol)
num_confs = mols.NumConformers()
globs = np.empty(num_confs)
# pbfs = np.empty(num_confs)
for i in range(num_confs):
mols.SetConformer(i)
pymol = pybel.Molecule(mols)
# calculate properties
globs[i] = calc_glob(pymol)
# pbfs[i] = calc_pbf(pymol)
data = {
'rotatable_bonds': rotatable_bonds(pymol),
'globularity': np.mean(globs),
'primary_amine': has_primary_amine(pymol),
# 'pbf': np.mean(pbfs)
}
return data
def smiles_to_ob(mol_string):
"""
Reads a SMILES string and creates a molecule object
Currently, an initial guess at 3D geometry is performed by RDkit.
:param mol_string: SMILES string
:type mol_string: str
:return: molecule object
:rtype: openbabel.OBMol
"""
mol = initial_geom_guess(mol_string)
obmol = ob.OBMol()
obConv = ob.OBConversion()
obConv.SetInAndOutFormats("mol", "mol")
obConv.ReadString(obmol, mol)
return obmol
def initial_geom_guess(smiles):
"""
Parses a SMILES string and performs an initial guess of geometry
:param smiles: SMILES structure string
:return: String with Mol structure text
:rtype: str
..todo: use openbabel for initial guess
"""
m = Chem.MolFromSmiles(smiles)
m2 = Chem.AddHs(m)
# Generate initial guess
AllChem.EmbedMolecule(m2, AllChem.ETKDG())
AllChem.MMFFOptimizeMolecule(m2)
# Write mol file
return Chem.MolToMolBlock(m2)
def run_confab(mol, rmsd_cutoff=0.5, conf_cutoff=100000, energy_cutoff=50.0, confab_verbose=False):
"""
Generate ensemble of conformers to perform calculations on
:param mol: initial molecule to generate conformers from
:param rmsd_cutoff: similarity threshold for conformers, default: 0.5
:param conf_cutoff: max number of conformers to generate, default: 100,000
:param energy_cutoff: max relative energy between conformers, default: 50
:param confab_verbose: whether confab should report on rotors
:type mol: openbabel.OBMol
:type rmsd_cutoff: float
:type conf_cutoff: int
:type energy_cutoff: float
:type confab_verbose: bool
:return: list of conformers for a given molecule
:rtype: openbabel.OBMol
"""
pff = ob.OBForceField_FindType("mmff94")
pff.Setup(mol)
pff.DiverseConfGen(rmsd_cutoff, conf_cutoff, energy_cutoff, confab_verbose)
pff.GetConformers(mol)
return mol
def calc_glob(mol):
"""
Calculates the globularity (glob) of a molecule
glob varies from 0 to 1 with completely flat molecules like benzene having a
glob of 0 and spherical molecules like adamantane having a glob of 1
:param mol: pybel molecule object
:type mol: pybel.Molecule
:return: globularity of molecule
:rtype: float | int
"""
points = get_atom_coords(mol, heavy_only=False)
if points is None:
return 0
points = points.T
# calculate covariance matrix
cov_mat = np.cov([points[0, :], points[1, :], points[2, :]])
# calculate eigenvalues of covariance matrix and sort
vals, vecs = np.linalg.eig(cov_mat)
vals = np.sort(vals)[::-1]
# glob is ratio of last eigenvalue and first eigenvalue
if vals[0] != 0:
return vals[-1] / vals[0]
else:
return -1
def calc_pbf(mol):
"""
Uses SVD to fit atoms in molecule to a plane then calculates the average
distance to that plane.
:param mol: pybel molecule object
:type mol: pybel.Molecule
:return: average distance of all atoms to the best fit plane
:rtype: float
"""
points = get_atom_coords(mol)
c, n = svd_fit(points)
pbf = calc_avg_dist(points, c, n)
return pbf
def has_primary_amine(mol):
"""
Uses SMARTS to determine if the molecule has a primary amine.
:param mol: pybel molecule object
:return: 1 if mol has a primary amine, 0 otherwise
:rtype: int
"""
primary_amines = PRIMARY_AMINE_SMARTS.findall(mol)
return int(len(primary_amines) > 0)
def rotatable_bonds(mol):
"""
Calculates the number of rotatable bonds in a molecules. Rotors are defined
as any non-terminal bond between heavy atoms, excluding amides
:param mol: pybel molecule object
:type mol: pybel.Molecule
:return rb: number of rotatable bonds
:rtype int
"""
rb = 0
for bond in ob.OBMolBondIter(mol.OBMol):
if is_rotor(bond):
rb += 1
return rb
def is_rotor(bond, include_amides=False):
"""
Determines if a bond is rotatable
Rules for rotatable bonds:
Must be a single or triple bond
Must include two heavy atoms
Cannot be terminal
Cannot be in a ring
If a single bond to one sp hybridized atom, not rotatable
:param bond:
:return: If a bond is rotatable
:rtype: bool
"""
# Must be single or triple bond
if bond.IsDouble(): return False
# Don't count the N-C bond of amides
if bond.IsAmide() and not include_amides: return False
# Not in a ring
if bond.FindSmallestRing() is not None: return False
# Don't count single bonds adjacent to triple bonds, still want to count the triple bond
if (bond.GetBeginAtom().GetHyb() == 1) != (bond.GetEndAtom().GetHyb() == 1): return False
# Cannot be terminal
if bond.GetBeginAtom().GetHvyValence() > 1 and bond.GetEndAtom().GetHvyValence() > 1: return True
def calc_avg_dist(points, C, N):
"""
Calculates the average distance a given set of points is from a plane
:param points: numpy array of points
:param C: centroid vector of plane
:param N: normal vector of plane
:return Average distance of each atom from the best-fit plane
"""
sum = 0
for xyz in points:
sum += abs(distance(xyz, C, N))
return sum / len(points)
def get_atom_coords(mol, heavy_only=False):
"""
Retrieve the 3D coordinates of all atoms in a molecules
:param mol: pybel molecule object
:return numpy array of coordinates
"""
num_atoms = len(mol.atoms)
pts = np.empty(shape=(num_atoms, 3))
atoms = mol.atoms
for a in range(num_atoms):
pts[a] = atoms[a].coords
return pts
def svd_fit(X):
"""
Fitting algorithmn was obtained from https://gist.github.com/lambdalisue/7201028
Find (n - 1) dimensional standard (e.g. line in 2 dimension, plane in 3
dimension, hyperplane in n dimension) via solving Singular Value
Decomposition.
The idea | |
else:
self.assertEqual(job["estimatedDiskUsage"], 4)
goldenFiles = goldenFilesC
currentRun = 0
currentLumi = 0
currentEvent = 0
for fileObj in jobFiles:
assert fileObj["lfn"] in goldenFiles, \
"Error: Unknown file in merge jobs."
goldenFiles.remove(fileObj["lfn"])
fileRun = list(fileObj["runs"])[0].run
fileLumi = min(list(fileObj["runs"])[0])
fileEvent = fileObj["first_event"]
if currentRun == 0:
continue
assert fileRun >= currentRun, \
"ERROR: Files not sorted by run."
if fileRun == currentRun:
assert fileLumi >= currentLumi, \
"ERROR: Files not ordered by lumi"
if fileLumi == currentLumi:
assert fileEvent >= currentEvent, \
"ERROR: Files not ordered by first event"
assert len(goldenFilesA) == 0 and len(goldenFilesB) == 0 and \
len(goldenFilesC) == 0, \
"ERROR: Files missing from merge jobs."
return
def testMaxMergeSize2(self):
"""
_testMaxMergeSize2_
Set the minimum merge size to be one byte larger than the largest job
group in the WMBS instance and the max merge size to be one byte larger
than the total size of two of the groups. Verify that one merge job
is produced with two of the job groups in it.
"""
self.stuffWMBS()
splitter = SplitterFactory()
jobFactory = splitter(package="WMCore.WMBS",
subscription=self.mergeSubscription)
result = jobFactory(min_merge_size=4097, max_merge_size=7169,
max_merge_events=20000)
assert len(result) == 1, \
"ERROR: More than one JobGroup returned."
assert len(result[0].jobs) == 1, \
"ERROR: One job should have been returned."
goldenFilesA = ["file1", "file2", "file3", "file4"]
goldenFilesB = ["fileA", "fileB", "fileC"]
goldenFilesC = ["fileI", "fileII", "fileIII", "fileIV"]
self.assertEqual(result[0].jobs[0]["estimatedDiskUsage"], 7)
self.assertEqual(result[0].jobs[0]["possiblePSN"], {"T1_US_FNAL", "T2_CH_CERN"})
jobFiles = list(result[0].jobs)[0].getFiles()
currentRun = 0
currentLumi = 0
currentEvent = 0
for fileObj in jobFiles:
if fileObj["lfn"] in goldenFilesA:
goldenFilesA.remove(fileObj["lfn"])
elif fileObj["lfn"] in goldenFilesB:
goldenFilesB.remove(fileObj["lfn"])
elif fileObj["lfn"] in goldenFilesC:
goldenFilesC.remove(fileObj["lfn"])
fileRun = list(fileObj["runs"])[0].run
fileLumi = min(list(fileObj["runs"])[0])
fileEvent = fileObj["first_event"]
if currentRun == 0:
currentRun = fileRun
currentLumi = fileLumi
currentEvent = fileEvent
continue
assert fileRun >= currentRun, \
"ERROR: Files not sorted by run."
if fileRun == currentRun:
assert fileLumi >= currentLumi, \
"ERROR: Files not ordered by lumi"
if fileLumi == currentLumi:
assert fileEvent >= currentEvent, \
"ERROR: Files not ordered by first event"
currentRun = fileRun
currentLumi = fileLumi
currentEvent = fileEvent
assert len(goldenFilesB) == 0 and \
(len(goldenFilesA) == 0 or len(goldenFilesC) == 0), \
"ERROR: Files not allocated to jobs correctly."
return
def testMaxEvents1(self):
"""
_testMaxEvents1_
Set the maximum number of events per merge job to 1.
"""
self.stuffWMBS()
splitter = SplitterFactory()
jobFactory = splitter(package="WMCore.WMBS",
subscription=self.mergeSubscription)
result = jobFactory(min_merge_size=1, max_merge_size=20000,
max_merge_events=1)
assert len(result) == 1, \
"ERROR: More than one JobGroup returned: %s" % result
assert len(result[0].jobs) == 3, \
"ERROR: Three jobs should have been returned: %s" % len(result[0].jobs)
goldenFilesA = ["file1", "file2", "file3", "file4"]
goldenFilesB = ["fileA", "fileB", "fileC"]
goldenFilesC = ["fileI", "fileII", "fileIII", "fileIV"]
for job in result[0].jobs:
self.assertEqual(job["possiblePSN"], {"T1_US_FNAL", "T2_CH_CERN"})
jobFiles = job.getFiles()
if jobFiles[0]["lfn"] in goldenFilesA:
self.assertEqual(job["estimatedDiskUsage"], 4)
goldenFiles = goldenFilesA
elif jobFiles[0]["lfn"] in goldenFilesB:
self.assertEqual(job["estimatedDiskUsage"], 3)
goldenFiles = goldenFilesB
else:
self.assertEqual(job["estimatedDiskUsage"], 4)
goldenFiles = goldenFilesC
currentRun = 0
currentLumi = 0
currentEvent = 0
for fileObj in jobFiles:
assert fileObj["lfn"] in goldenFiles, \
"Error: Unknown file in merge jobs."
goldenFiles.remove(fileObj["lfn"])
fileRun = list(fileObj["runs"])[0].run
fileLumi = min(list(fileObj["runs"])[0])
fileEvent = fileObj["first_event"]
if currentRun == 0:
currentRun = fileRun
currentLumi = fileLumi
currentEvent = fileEvent
continue
assert fileRun >= currentRun, \
"ERROR: Files not sorted by run: %s, %s" % (fileRun, currentRun)
if fileRun == currentRun:
assert fileLumi >= currentLumi, \
"ERROR: Files not ordered by lumi"
if fileLumi == currentLumi:
assert fileEvent >= currentEvent, \
"ERROR: Files not ordered by first event"
currentRun = fileRun
currentLumi = fileLumi
currentEvent = fileEvent
assert len(goldenFilesA) == 0 and len(goldenFilesB) == 0 and \
len(goldenFilesC) == 0, \
"ERROR: Files missing from merge jobs."
return
def testMaxEvents2(self):
"""
_testMaxEvents2_
Set the minimum merge size to be one byte larger than the largest job
group in the WMBS instance and the max events to be one event larger
than the total events in two of the groups. Verify that one merge job
is produced with two of the job groups in it.
"""
self.stuffWMBS()
splitter = SplitterFactory()
jobFactory = splitter(package="WMCore.WMBS",
subscription=self.mergeSubscription)
result = jobFactory(min_merge_size=4097, max_merge_size=20000,
max_merge_events=7169)
assert len(result) == 1, \
"ERROR: More than one JobGroup returned."
assert len(result[0].jobs) == 1, \
"ERROR: One job should have been returned."
self.assertEqual(result[0].jobs[0]["estimatedDiskUsage"], 7)
self.assertEqual(result[0].jobs[0]["possiblePSN"], {"T1_US_FNAL", "T2_CH_CERN"})
goldenFilesA = ["file1", "file2", "file3", "file4"]
goldenFilesB = ["fileA", "fileB", "fileC"]
goldenFilesC = ["fileI", "fileII", "fileIII", "fileIV"]
jobFiles = list(result[0].jobs)[0].getFiles()
currentRun = 0
currentLumi = 0
currentEvent = 0
for fileObj in jobFiles:
if fileObj["lfn"] in goldenFilesA:
goldenFilesA.remove(fileObj["lfn"])
elif fileObj["lfn"] in goldenFilesB:
goldenFilesB.remove(fileObj["lfn"])
elif fileObj["lfn"] in goldenFilesC:
goldenFilesC.remove(fileObj["lfn"])
fileRun = list(fileObj["runs"])[0].run
fileLumi = min(list(fileObj["runs"])[0])
fileEvent = fileObj["first_event"]
if currentRun == 0:
currentRun = fileRun
currentLumi = fileLumi
currentEvent = fileEvent
continue
assert fileRun >= currentRun, \
"ERROR: Files not sorted by run."
if fileRun == currentRun:
assert fileLumi >= currentLumi, \
"ERROR: Files not ordered by lumi"
if fileLumi == currentLumi:
assert fileEvent >= currentEvent, \
"ERROR: Files not ordered by first event"
currentRun = fileRun
currentLumi = fileLumi
currentEvent = fileEvent
assert len(goldenFilesB) == 0 and \
(len(goldenFilesA) == 0 or len(goldenFilesC) == 0), \
"ERROR: Files not allocated to jobs correctly."
return
def testParallelProcessing(self):
"""
_testParallelProcessing_
Verify that merging works correctly when multiple processing
subscriptions are run over the same input files. The merging algorithm
should ignore processing jobs that feed into different merge
subscriptions.
"""
locationAction = self.daoFactory(classname="Locations.New")
locationAction.execute(siteName="T2_CH_CERN", pnn="T2_CH_CERN")
locationAction.execute(siteName="T1_US_FNAL", pnn="T2_CH_CERN")
mergeFilesetA = Fileset(name="mergeFilesetA")
mergeFilesetB = Fileset(name="mergeFilesetB")
mergeFilesetA.create()
mergeFilesetB.create()
mergeMergedFilesetA = Fileset(name="mergeMergedFilesetA")
mergeMergedFilesetB = Fileset(name="mergeMergedFilesetB")
mergeMergedFilesetA.create()
mergeMergedFilesetB.create()
mergeWorkflow = Workflow(name="mergeWorkflow", spec="bogus",
owner="Steve", task="Test")
mergeWorkflow.create()
mergeSubscriptionA = Subscription(fileset=mergeFilesetA,
workflow=mergeWorkflow,
split_algo="WMBSMergeBySize")
mergeSubscriptionB = Subscription(fileset=mergeFilesetB,
workflow=mergeWorkflow,
split_algo="WMBSMergeBySize")
mergeSubscriptionA.create()
mergeSubscriptionB.create()
inputFileset = Fileset(name="inputFileset")
inputFileset.create()
inputFileA = File(lfn="inputLFNA")
inputFileB = File(lfn="inputLFNB")
inputFileA.create()
inputFileB.create()
procWorkflowA = Workflow(name="procWorkflowA", spec="bunk2",
owner="Steve", task="Test")
procWorkflowA.create()
procWorkflowA.addOutput("output", mergeFilesetA, mergeMergedFilesetA)
procWorkflowB = Workflow(name="procWorkflowB", spec="bunk3",
owner="Steve", task="Test2")
procWorkflowB.create()
procWorkflowB.addOutput("output", mergeFilesetB, mergeMergedFilesetB)
procSubscriptionA = Subscription(fileset=inputFileset,
workflow=procWorkflowA,
split_algo="EventBased")
procSubscriptionA.create()
procSubscriptionB = Subscription(fileset=inputFileset,
workflow=procWorkflowB,
split_algo="EventBased")
procSubscriptionB.create()
jobGroupA = JobGroup(subscription=procSubscriptionA)
jobGroupA.create()
jobGroupB = JobGroup(subscription=procSubscriptionB)
jobGroupB.create()
changeStateDAO = self.daoFactory(classname="Jobs.ChangeState")
testJobA = Job()
testJobA.addFile(inputFileA)
testJobA.create(jobGroupA)
testJobA["state"] = "cleanout"
testJobA["oldstate"] = "new"
testJobA["couch_record"] = "somejive"
testJobA["retry_count"] = 0
testJobA["outcome"] = "success"
testJobA.save()
testJobB = Job()
testJobB.addFile(inputFileB)
testJobB.create(jobGroupA)
testJobB["state"] = "cleanout"
testJobB["oldstate"] = "new"
testJobB["couch_record"] = "somejive"
testJobB["retry_count"] = 0
testJobB["outcome"] = "success"
testJobB.save()
testJobC = Job()
testJobC.addFile(inputFileA)
testJobC.create(jobGroupB)
testJobC["state"] = "cleanout"
testJobC["oldstate"] = "new"
testJobC["couch_record"] = "somejive"
testJobC["retry_count"] = 0
testJobC["outcome"] = "success"
testJobC.save()
testJobD = Job()
testJobD.addFile(inputFileA)
testJobD.create(jobGroupB)
testJobD["state"] = "cleanout"
testJobD["oldstate"] = "new"
testJobD["couch_record"] = "somejive"
testJobD["retry_count"] = 0
testJobD["outcome"] = "failure"
testJobD.save()
testJobE = Job()
testJobE.addFile(inputFileB)
testJobE.create(jobGroupB)
testJobE["state"] = "cleanout"
testJobE["oldstate"] = "new"
testJobE["couch_record"] = "somejive"
testJobE["retry_count"] = 0
testJobE["outcome"] = "success"
testJobE.save()
testJobF = Job()
testJobF.addFile(inputFileB)
testJobF.create(jobGroupB)
testJobF["state"] = "cleanout"
testJobF["oldstate"] = "new"
testJobF["couch_record"] = "somejive"
testJobF["retry_count"] = 0
testJobF["outcome"] = "failure"
testJobF.save()
changeStateDAO.execute([testJobA, testJobB, testJobC, testJobD,
testJobE, testJobF])
fileA = File(lfn="fileA", size=1024, events=1024, first_event=0,
locations={"T2_CH_CERN"})
fileA.addRun(Run(1, *[45]))
fileA.create()
fileA.addParent(inputFileA["lfn"])
fileB = File(lfn="fileB", size=1024, events=1024, first_event=0,
locations={"T2_CH_CERN"})
fileB.addRun(Run(1, *[45]))
fileB.create()
fileB.addParent(inputFileB["lfn"])
jobGroupA.output.addFile(fileA)
jobGroupA.output.addFile(fileB)
jobGroupA.output.commit()
mergeFilesetA.addFile(fileA)
mergeFilesetA.addFile(fileB)
mergeFilesetA.commit()
fileC = File(lfn="fileC", size=1024, events=1024, first_event=0,
locations={"T2_CH_CERN"})
fileC.addRun(Run(1, *[45]))
fileC.create()
fileC.addParent(inputFileA["lfn"])
fileD = File(lfn="fileD", size=1024, events=1024, first_event=0,
locations={"T2_CH_CERN"})
fileD.addRun(Run(1, *[45]))
fileD.create()
fileD.addParent(inputFileB["lfn"])
jobGroupB.output.addFile(fileC)
jobGroupB.output.addFile(fileD)
mergeFilesetB.addFile(fileC)
mergeFilesetB.addFile(fileD)
mergeFilesetB.commit()
splitter = SplitterFactory()
jobFactory = splitter(package="WMCore.WMBS",
subscription=mergeSubscriptionB)
result = jobFactory(min_merge_size=1, max_merge_size=20000,
max_merge_events=7169)
assert len(result) == 0, \
"Error: No merge jobs should have been created."
fileE = File(lfn="fileE", size=1024, events=1024, first_event=0,
locations={"T2_CH_CERN"})
fileE.addRun(Run(1, *[45]))
fileE.create()
fileE.addParent(inputFileA["lfn"])
fileF = File(lfn="fileF", size=1024, events=1024, first_event=0,
locations={"T2_CH_CERN"})
fileF.addRun(Run(1, *[45]))
fileF.create()
fileF.addParent(inputFileB["lfn"])
jobGroupB.output.addFile(fileE)
jobGroupB.output.addFile(fileF)
mergeFilesetB.addFile(fileE)
mergeFilesetB.addFile(fileF)
mergeFilesetB.commit()
testJobD["outcome"] = "success"
testJobD.save()
testJobF["outcome"] = "success"
testJobF.save()
changeStateDAO.execute([testJobD, testJobF])
result = jobFactory(min_merge_size=1, max_merge_size=20000,
max_merge_events=7169)
assert len(result) == 1, \
"Error: One merge job should have been created: %s" % len(result)
return
def testLocationMerging(self):
"""
_testLocationMerging_
Verify that files residing on different SEs are not merged together in
the same job.
"""
self.stuffWMBS()
locationAction = self.daoFactory(classname="Locations.New")
locationAction.execute(siteName="T1_UK_RAL", pnn="T1_UK_RAL_Disk")
fileSite2 = File(lfn="fileSite2", size=4098, events=1024,
first_event=0, locations={"T1_UK_RAL_Disk"})
fileSite2.addRun(Run(1, *[46]))
fileSite2.create()
fileSite2.addParent(self.parentFileSite2["lfn"])
self.mergeFileset.addFile(fileSite2)
self.mergeFileset.commit()
splitter = SplitterFactory()
jobFactory = splitter(package="WMCore.WMBS",
subscription=self.mergeSubscription)
result = jobFactory(min_merge_size=4097, max_merge_size=99999999,
max_merge_events=999999999)
| |
<reponame>jensguballa/andyBee<gh_stars>1-10
from lxml import etree
from app import geocache_db
from geocache_model_sql import Cache, Cacher, CacheType, CacheContainer, CacheCountry, CacheState, CacheToAttribute, Waypoint, WaypointSym, WaypointType, Log, LogType, Attribute, UserNote
from geocache import Geocache
from db import DbInterface
import re
import datetime
import time
import calendar
from dateutil.parser import parse
GPX_NS = "http://www.topografix.com/GPX/1/0"
GPX = "{%s}" % GPX_NS
GS_NS = "http://www.groundspeak.com/cache/1/0/1"
GS = "{%s}" % GS_NS
XSI_NS = "http://www.w3.org/2001/XMLSchema-instance"
XSI = "{%s}" % XSI_NS
latmin = 0
latmax = 0
lonmin = 0
lonmax = 0
deleted_wpt = {}
log_pool = {}
cacher_pool = None
def coords_to_string(coord, str1, str2):
string = str1
if coord < 0:
coord = -coord
string = str2
degrees = int(coord)
string += ' ' + str(degrees) + ' ' + '%.3f' % ((coord - degrees) * 60)
return string
def wpt_to_xml(parent, waypoint, geocache, data):
w_wpt = None
lat = waypoint['lat']
lon = waypoint['lon']
if waypoint['name'] == waypoint['gc_code']:
if geocache['coords_updated']:
lat = geocache['corr_lat']
lon = geocache['corr_lon']
data['latmin'] = min(data['latmin'], lat)
data['latmax'] = max(data['latmax'], lat)
data['lonmin'] = min(data['lonmin'], lon)
data['lonmax'] = max(data['lonmax'], lon)
w_wpt = subnode(parent, GPX+"wpt", attrib={'lat': str(lat), 'lon': str(lon)})
subnode(w_wpt, GPX+"time", text=waypoint['time'])
subnode(w_wpt, GPX+"name", text=waypoint['name'])
subnode(w_wpt, GPX+"cmt", text=waypoint['cmt'])
subnode(w_wpt, GPX+"desc", text=waypoint['descr'])
subnode(w_wpt, GPX+"url", text=waypoint['url'])
subnode(w_wpt, GPX+"urlname", text=waypoint['urlname'])
subnode(w_wpt, GPX+"sym", text=waypoint['sym'])
subnode(w_wpt, GPX+"type", text=waypoint['type'])
return w_wpt
def geocache_to_xml(parent, geocache, data):
wpt_node = None
print "DB01", geocache['waypoints']
for waypoint in geocache['waypoints']:
if waypoint['name'] == waypoint['gc_code']:
wpt_node = wpt_to_xml(parent, waypoint, geocache, data)
cache_node = subnode(wpt_node, GS+"cache", nsmap={'groundspeak':GS_NS},
attrib={
'id': str(geocache['id']),
'available': "True" if geocache['available'] else "False",
'archived': "True" if geocache['archived'] else "False"})
subnode(cache_node, GS+"name", text=geocache['name'])
subnode(cache_node, GS+"placed_by", text=geocache['placed_by'])
subnode(cache_node, GS+"owner", text=geocache['owner'], attrib={'id': str(geocache['owner_id'])})
subnode(cache_node, GS+"type", text=geocache['type'])
subnode(cache_node, GS+"container", text=geocache['container'])
if len(geocache['attributes']):
attr_node = subnode(cache_node, GS+"attributes")
for attribute in geocache['attributes']:
subnode(attr_node, GS+"attribute", text=attribute['name'],
attrib={
'id': str(attribute['gc_id']),
'inc': "1" if attribute['inc'] else "0"})
subnode(cache_node, GS+"difficulty", text=re.sub('\.0','', str(geocache['difficulty'])))
subnode(cache_node, GS+"terrain", text=re.sub('\.0','',str(geocache['terrain'])))
subnode(cache_node, GS+"country", text=geocache['country'])
subnode(cache_node, GS+"state", text=geocache['state'])
subnode(cache_node, GS+"short_description", text=geocache['short_desc'],
attrib={'html': "True" if geocache['short_html'] else "False"})
orig_coords_txt = ''
if geocache['coords_updated']:
orig_coords_txt = 'Original coordinates: ' + coords_to_string(geocache['lat'], 'N', 'S') + ' ' + coords_to_string(geocache['lon'], 'E', 'W')
if geocache['long_html']:
orig_coords_txt = '<p>' + orig_coords_txt + '</p>'
user_note = ''
if geocache['note_present']:
note = geocache_db.get_by_id(UserNote, geocache['id'])
user_note = note['note']
if geocache['long_html']:
user_note = '<div>' + user_note.replace("\n", "<br />") + '</div>'
subnode(cache_node, GS+"long_description", text=geocache['long_desc'] + orig_coords_txt + user_note,
attrib={'html': "True" if geocache['long_html'] else "False"})
subnode(cache_node, GS+"encoded_hints", text=geocache['encoded_hints'])
if len(geocache['logs']) and (data['max_logs'] > 0):
sort_logs = sorted(geocache['logs'], key=lambda log: log['date'])
logs_node = subnode(cache_node, GS+"logs")
for log in sort_logs[0:data['max_logs']]:
log_node = subnode(logs_node, GS+"log", attrib={'id': str(log['id'])})
subnode(log_node, GS+"date", text=log['date'])
subnode(log_node, GS+"type", text=log['type'])
subnode(log_node, GS+"finder", text=log['finder'], attrib={'id': str(log['finder_id'])})
subnode(log_node, GS+"text", text=log['text'], attrib={'encoded': 'True' if log['text_encoded'] else 'False'})
if data['waypoints']:
for waypoint in geocache['waypoints']:
if waypoint['name'] == waypoint['gc_code']:
wpt_to_xml(parent, waypoint, geocache, data)
def subnode(parent, tag_name, text=None, attrib=None, nsmap=None):
node = etree.SubElement(parent, tag_name, nsmap=nsmap)
if text is not None:
node.text = text
if attrib is not None:
for name, val in attrib.iteritems():
node.attrib[name] = val
return node
def export_gpx(data):
data['latmin'] = 1000.0
data['latmax'] = -1000.0
data['lonmin'] = 1000.0
data['lonmax'] = -1000.0
root = etree.Element(GPX+"gpx", nsmap={None:GPX_NS, "xsi":XSI_NS})
root.attrib["version"] = "1.0"
root.attrib["creator"] = "geodb, all rights reserved"
root.attrib[XSI+"schemaLocation"] = "{} {}/gpx.xsd {} {}/cache.xsd".format(GPX_NS,GPX_NS,GS_NS,GS_NS)
subnode(root, GPX+"name" , text="Cache Listing Generated by andyBee")
subnode(root, GPX+"desc" , text="This is an individual list of geocaches generated by andyBee.")
subnode(root, GPX+"author" , text="Hi, it's me: <NAME>")
subnode(root, GPX+"email" , text="<EMAIL>")
subnode(root, GPX+"url" , text="http://www.guballa.de")
subnode(root, GPX+"urlname", text="Geocaching. What else?")
subnode(root, GPX+"time" , text=datetime.datetime.now().isoformat())
subnode(root, GPX+"keyword", text="cache, geocache")
bounds = subnode(root, GPX+"bounds")
for id in data['list']:
geocache = Geocache(id, geocache_db).fetch_singular()
geocache_to_xml(root, geocache.get_data(), data)
bounds.attrib['minlat'] = str(data['latmin'])
bounds.attrib['minlon'] = str(data['lonmin'])
bounds.attrib['maxlat'] = str(data['latmax'])
bounds.attrib['maxlon'] = str(data['lonmax'])
et = etree.ElementTree(root)
return etree.tostring(et, pretty_print=True, encoding="UTF-8", xml_declaration=True)
class GpxImporter():
def __init__(self, geocache_db, max_logs, pref_owner):
self.waypoint_itf = DbInterface(geocache_db, Waypoint)
self.waypoint_sym_itf = DbInterface(geocache_db, WaypointSym)
self.waypoint_type_itf = DbInterface(geocache_db, WaypointType)
self.cache_itf = DbInterface(geocache_db, Cache)
self.cache_type_itf = DbInterface(geocache_db, CacheType)
self.cache_state_itf = DbInterface(geocache_db, CacheState)
self.cache_country_itf = DbInterface(geocache_db, CacheCountry)
self.cache_container_itf = DbInterface(geocache_db, CacheContainer)
self.cache_to_attribute_itf = DbInterface(geocache_db, CacheToAttribute)
self.cacher_itf = CacherInterface(geocache_db, Cacher)
self.log_type_itf = DbInterface(geocache_db, LogType)
self.log_itf = LogInterface(geocache_db, Log)
self.db = geocache_db
self.deleted_wpt = {}
self.max_logs = max_logs
self.pref_owner = pref_owner
self.last_updated = 0
def import_gpx(self, gpx_file):
try:
start = time.time()
tree = etree.parse(gpx_file)
end = time.time()
except:
return
gpx = tree.getroot()
if gpx.tag == GPX+"gpx":
# First, parse all the common elements
for node in gpx:
if node.tag == GPX+"time":
self.last_updated = calendar.timegm(parse(node.text).utctimetuple())
break
# Second, parse all waypoints
for node in gpx:
if node.tag == GPX+"wpt":
wpt = self._parse_wpt(node)
self._merge_wpt(wpt)
geocache_db.execute('''UPDATE waypoint
SET cache_id = (SELECT cache.id FROM cache WHERE cache.gc_code = waypoint.gc_code)
WHERE cache_id IS NULL''')
self.db.commit()
def _parse_wpt(self, node):
wpt = Waypoint()
wpt.cache = None
wpt.db['lat'] = float(node.get("lat"))
wpt.db['lon'] = float(node.get("lon"))
for child in node:
if child.tag == GPX+"time":
wpt.db['time'] = child.text
elif child.tag == GPX+"name":
wpt.db['name'] = child.text
wpt.db['gc_code'] = re.sub('^..', 'GC', child.text)
elif child.tag == GPX+"desc":
wpt.db['descr'] = child.text
elif child.tag == GPX+"url":
wpt.db['url'] = child.text
elif child.tag == GPX+"urlname":
wpt.db['urlname'] = child.text
elif child.tag == GPX+"sym":
wpt.sym = child.text
wpt.db['sym_id'] = self.waypoint_sym_itf.create_singleton_value('name', child.text)
elif child.tag == GPX+"type":
#wpt.db['type_id'] = geocache_db.create_singleton_id(WaypointType, {'name': child.text})
wpt.db['type_id'] = self.waypoint_type_itf.create_singleton_value('name', child.text)
elif child.tag == GPX+"cmt":
wpt.db['cmt'] = child.text
elif child.tag == GS+"cache":
wpt.cache = self._parse_cache(child)
wpt.db['cache_id'] = wpt.cache.db['id']
if wpt.cache is not None:
# copy some values from the waypoint, so that join statements
# can be avoided
wpt.cache.db['hidden'] = wpt.db['time']
wpt.cache.db['lat'] = wpt.db['lat']
wpt.cache.db['lon'] = wpt.db['lon']
wpt.cache.db['gc_code'] = wpt.db['name']
wpt.cache.db['url'] = wpt.db['url']
wpt.cache.db['found'] = (wpt.sym == 'Geocache Found')
return wpt
def _parse_cache(self, node):
cache = Cache()
cache.db['last_updated'] = self.last_updated
cache.db['id'] = int(node.get("id"))
cache.db['available'] = (node.get("available") == "True")
cache.db['archived'] = (node.get("archived") == "True")
for child in node:
if child.tag == GS+"name":
cache.db['name'] = child.text
elif child.tag == GS+"placed_by":
cache.db['placed_by'] = child.text
elif child.tag == GS+"owner":
owner_id = int(child.get("id"))
self.cacher_itf.create_singleton(owner_id, child.text)
cache.db['owner_id'] = owner_id
# geocache_db.create_singleton_id(Cacher, {'id': child.get("id") , 'name': child.text})
elif child.tag == GS+"type":
#cache.db['type_id'] = geocache_db.create_singleton_id(CacheType, {'name': child.text})
cache.db['type_id'] = self.cache_type_itf.create_singleton_value('name', child.text)
elif child.tag == GS+"container":
#cache.db['container_id'] = geocache_db.create_singleton_id(CacheContainer, {'name': child.text})
cache.db['container_id'] = self.cache_container_itf.create_singleton_value('name', child.text)
elif child.tag == GS+"difficulty":
cache.db['difficulty'] = float(child.text)
elif child.tag == GS+"terrain":
cache.db['terrain'] = float(child.text)
elif child.tag == GS+"country":
#cache.db['country_id'] = geocache_db.create_singleton_id(CacheCountry, {'name': child.text})
cache.db['country_id'] = self.cache_country_itf.create_singleton_value('name', child.text)
elif child.tag == GS+"state":
#cache.db['state_id'] = geocache_db.create_singleton_id(CacheState, {'name': child.text})
cache.db['state_id'] = self.cache_state_itf.create_singleton_value('name', child.text)
elif child.tag == GS+"short_description":
cache.db['short_desc'] = child.text
cache.db['short_html'] = (child.get("html") == "True")
elif child.tag == GS+"long_description":
cache.db['long_desc'] = child.text
cache.db['long_html'] = (child.get("html") == "True")
elif child.tag == GS+"encoded_hints":
cache.db['encoded_hints'] = child.text
elif child.tag == GS+"attributes":
cache.attributes = []
for node_attr in child:
if node_attr.tag == GS+"attribute":
cache.attributes.append(self._parse_attribute(node_attr))
elif child.tag == GS+"logs":
cache.logs = []
for node_log in child:
if node_log.tag == GS+"log":
cache.logs.append(self._parse_log(node_log, cache.db['id']))
return cache
def _parse_attribute(self, node):
attr = Attribute()
attr.db['gc_id'] = int(node.get("id"))
attr.db['inc'] = (node.get("inc") == "1")
attr.db['name'] = node.text
return attr
def _parse_log(self, node, cache_id):
log = Log()
log.db['id'] = int(node.get("id"))
log.db['cache_id'] = cache_id
for log_node in node:
if log_node.tag == GS+"date":
log.db['date'] = log_node.text
elif log_node.tag == GS+"type":
#log.db['type_id'] = geocache_db.create_singleton_id(LogType, {'name': log_node.text})
log.db['type_id'] = self.log_type_itf.create_singleton_value('name', log_node.text)
elif log_node.tag == GS+"finder":
log.db['finder_id'] = int(log_node.get("id"))
log.finder = log_node.text
elif log_node.tag == GS+"text":
log.db['text'] = log_node.text
log.db['text_encoded'] = (log_node.get("encoded") == "True")
elif log_node.tag == GS+"log_wpt":
log.db['lat'] = float(log_node.get("lat"))
log.db['lon'] = float(log_node.get("lon"))
return log
def _merge_wpt(self, wpt):
gc_code = wpt.db['gc_code']
#cache_exists = geocache_db.get_singleton_id(Cache, {'gc_code': gc_code}) != None
cache_exists = self.cache_itf.get_id('gc_code', gc_code) != None
if cache_exists:
if gc_code == wpt.db['name']: # waypoint for the cache itself
geocache_db.execute('DELETE FROM waypoint WHERE gc_code = ? AND name = ?', (gc_code, gc_code))
else: # additional waypoint
if gc_code not in self.deleted_wpt:
geocache_db.execute('DELETE FROM waypoint WHERE gc_code = ? AND name != ?', (gc_code, gc_code))
self.deleted_wpt[gc_code] = True
self.waypoint_itf.insert(wpt.db)
if wpt.cache is not None:
self._merge_cache(wpt.cache, cache_exists)
def _merge_cache(self, cache, cache_exists):
last_logs = self._merge_logs(cache.logs, cache.db['id'])
cache.db['last_logs'] = last_logs
if cache_exists:
self.cache_itf.update(cache.db['id'], cache.db)
else:
self.cache_itf.insert(cache.db)
self._merge_attributes(cache.attributes, cache.db['id'], cache_exists)
def _merge_logs(self, logs, cache_id):
db_logs = self.log_itf.get_cache_logs(cache_id)
merged_array = []
for log in logs:
if log.db['id'] in db_logs:
del db_logs[log.db['id']]
merged_array.append({'id': log.db['id'], 'date': log.db['date'], 'finder': log.finder, 'type_id': log.db['type_id'], 'action': 'update', 'db': log.db})
else:
merged_array.append({'id': log.db['id'], 'date': log.db['date'], | |
import pandas as pd
import sys, os
from collections import OrderedDict
from viola.core.bedpe import Bedpe
from viola.core.vcf import Vcf
from typing import (
List,
Optional,
)
class MultiBedpe(Bedpe):
"""
A database-like object that contains information of multiple BEDPE files.
In this class, main keys in most tables are "global id" instead of using
"SV id" from SV callers. "global id" is unique ID of all the SV record
across all the samples.
"""
_internal_attrs = [
"_df_id",
"_df_patients",
"_df_svpos",
"_odict_df_info",
"_ls_patients",
"_ls_infokeys",
"_odict_alltables",
"_repr_config",
"_sig_criteria"
]
_internal_attrs_set = set(_internal_attrs)
_repr_column_names = [
"id",
"bp1",
"bp2",
"strand",
"qual",
"svtype",
]
_repr_column_names_set = set(_repr_column_names)
def __init__(
self,
ls_bedpe: List[Bedpe] = None,
ls_patient_names: List[str] = None,
direct_tables: Optional[List[pd.DataFrame]] = None
):
if direct_tables is None:
df_id, df_patients, df_svpos, odict_df_info = self.__init__from_ls_bedpe(ls_bedpe, ls_patient_names)
self.__init__common(df_id, df_patients, df_svpos, odict_df_info)
else:
self.__init__common(*direct_tables)
def __init__from_ls_bedpe(self, ls_bedpe, ls_patient_names):
ls_df_id = []
ls_df_svpos = []
dict_ls_df_info = dict()
ls_patient_id = [i for i in range(len(ls_patient_names))]
df_patients = pd.DataFrame({'id': ls_patient_id, 'patients': ls_patient_names})
for bedpe, patient_id, patient_name in zip(ls_bedpe, ls_patient_id, ls_patient_names):
df_svpos = bedpe.get_table('positions')
df_id = df_svpos[['id']].copy()
df_id['patient_id'] = patient_id
df_id['global_id'] = str(patient_name) + '_' + df_id['id'].astype(str)
df_id = df_id[['global_id', 'patient_id', 'id']]
ls_df_id.append(df_id)
df_svpos['id'] = str(patient_name) + '_' + df_svpos['id'].astype(str)
ls_df_svpos.append(df_svpos)
for key, value in bedpe._odict_df_info.items():
value = value.copy()
value['id'] = str(patient_name) + '_' + value['id'].astype(str)
if dict_ls_df_info.get(key) is None:
dict_ls_df_info[key] = [value]
else:
dict_ls_df_info[key].append(value)
df_concat_id = pd.concat(ls_df_id, ignore_index=True)
df_concat_svpos = pd.concat(ls_df_svpos, ignore_index=True)
odict_df_info = OrderedDict()
for key, value in dict_ls_df_info.items():
odict_df_info[key] = pd.concat(value)
return (df_concat_id, df_patients, df_concat_svpos, odict_df_info)
def __init__common(self, df_id, df_patients, df_svpos, odict_df_info):
self._df_id = df_id
self._df_patients = df_patients
self._ls_patients = df_patients['patients'].to_list()
self._df_svpos = df_svpos
self._odict_df_info = odict_df_info
self._ls_infokeys = [x.lower() for x in odict_df_info.keys()]
ls_keys = ['global_id', 'patients', 'positions'] + self._ls_infokeys
ls_values = [df_id, df_patients, df_svpos] + list(odict_df_info.values())
self._odict_alltables = OrderedDict([(k, v) for k, v in zip(ls_keys, ls_values)])
self._repr_config = {
'info': None,
}
def filter_by_id(self, arrlike_id):
"""
filter_by_id(arrlike_id)
Filter MultiBedpe object according to the list of SV ids.
Return object is also an instance of the MultiBedpe object
Parameters
---------------
arrlike_id: list-like
Global ids which you would like to keep.
Returns
---------------
MultiBedpe
A MultiBedpe object with the SV id specified in the arrlike_id argument.
All records associated with SV ids that are not in the arrlike_id will be discarded.
"""
df_global_id = self.get_table('global_id')
out_global_id = df_global_id.loc[df_global_id['global_id'].isin(arrlike_id)].reset_index(drop=True)
out_patients = self.get_table('patients')
out_svpos = self._filter_by_id('positions', arrlike_id)
out_odict_df_info = OrderedDict([(k, self._filter_by_id(k, arrlike_id)) for k in self._ls_infokeys])
return MultiBedpe(direct_tables=[out_global_id, out_patients, out_svpos, out_odict_df_info])
def classify_manual_svtype(self, definitions=None, ls_conditions=None, ls_names=None, ls_order=None, return_data_frame=True, exclude_empty_cases=False):
"""
classify_manual_svtype(definitions, ls_conditions, ls_names, ls_order=None, exclude_empty_cases=False)
Classify SV records by user-defined criteria. A new INFO table named
'manual_sv_type' will be created.
Parameters
------------
definitions: path_or_buf or str, default None
Path to the file which specifies the definitions of custom SV classification. This argument is disabled when "ls_condition" is not None.
If "default" is specified, the simple length-based SV classification will be employed.
If "article" is specified, the same definition file which was used in the Viola publication will be reflected.
Below is the links to each of definition file you can specify on this method.
"default" -> https://github.com/dermasugita/Viola-SV/blob/master/examples/demo_sig/resources/definitions/sv_class_default.txt
"article" -> https://github.com/dermasugita/Viola-SV/blob/master/examples/demo_sig/resources/definitions/sv_class_article.txt
ls_conditions: List[callable] or List[str], default None
List of definitions of custom SV classification. The data type of the elements in the list can be callable or SV ID (str).
callable --> Functions that takes a self and returns a list of SV ID that satisfy the conditions of the SV class to be defined.
SV ID --> Lists of SV ID that satisfy the conditions of the SV class to be defined.
This argument is disabled when "definitions" is not None.
ls_names: List[str], default None
List of the names of the custom SV class corresponding to the "ls_conditions". This argument is disabled when "definitions" is not None.
return_series: bool, default True
Return counts of each custom SV class as a pd.Series.
exclude_empty_cases: bool, default False
If True, samples which have no SV record will be excluded.
Returns
---------
pd.DataFrame or None
"""
set_ids_current = set(self.ids)
obj = self
ls_ids = []
ls_result_names = []
if definitions is not None:
if isinstance(definitions, str):
if definitions == "default":
d = os.path.dirname(sys.modules["viola"].__file__)
definitions = os.path.join(d, "data/sv_class_default.txt")
ls_conditions, ls_names = self._parse_signature_definition_file(open(definitions, 'r'))
elif definitions == "article":
d = os.path.dirname(sys.modules["viola"].__file__)
definitions = os.path.join(d, "data/sv_class_article.txt")
ls_conditions, ls_names = self._parse_signature_definition_file(open(definitions, 'r'))
else:
ls_conditions, ls_names = self._parse_signature_definition_file(open(definitions, 'r'))
else:
ls_conditions, ls_names = self._parse_signature_definition_file(definitions)
for cond, name in zip(ls_conditions, ls_names):
obj = obj.filter_by_id(set_ids_current)
if callable(cond):
ids = cond(obj)
else:
ids = cond
set_ids = set(ids)
set_ids_intersection = set_ids_current & set_ids
ls_ids += list(set_ids_intersection)
ls_result_names += [name for i in range(len(set_ids_intersection))]
set_ids_current = set_ids_current - set_ids_intersection
ls_ids += list(set_ids_current)
ls_result_names += ['others' for i in range(len(set_ids_current))]
ls_zeros = [0 for i in range(len(self.ids))]
df_result = pd.DataFrame({'id': ls_ids, 'value_idx': ls_zeros, 'manual_sv_type': ls_result_names})
self.add_info_table('manual_sv_type', df_result)
if return_data_frame:
if ls_order is None:
pd_ind_reindex = pd.Index(ls_names + ['others'])
else:
pd_ind_reindex = pd.Index(ls_order)
df_feature_counts = self.get_feature_count_as_data_frame(ls_order=pd_ind_reindex, exclude_empty_cases=exclude_empty_cases)
return df_feature_counts
def get_feature_count_as_data_frame(self, feature='manual_sv_type', ls_order=None, exclude_empty_cases=False):
df_feature = self.get_table(feature)
df_id = self.get_table('global_id')
df_patients = self.get_table('patients')
df_merged = pd.merge(df_feature, df_id, left_on='id', right_on='global_id')
df_merged = df_merged.merge(df_patients, left_on='patient_id', right_on='id')
df_feature_counts = df_merged.pivot_table('global_id', index='patients', columns=feature, aggfunc='count', fill_value=0)
if not exclude_empty_cases:
df_feature_counts = df_feature_counts.reindex(self._ls_patients, fill_value=0)
if ls_order is not None:
pd_ind_reindex = pd.Index(ls_order, name=feature)
df_feature_counts = df_feature_counts.reindex(columns=pd_ind_reindex, fill_value=0)
return df_feature_counts
class MultiVcf(Vcf):
"""
A database-like object that contains information of multiple Vcf files.
In this class, main keys in most tables are "global id" instead of using
"SV id" from SV callers. "global id" is unique ID of all the SV record
across all the samples.
"""
_internal_attrs = [
"_df_id",
"_df_patients",
"_df_svpos",
"_odict_df_info",
"_ls_patients",
"_ls_infokeys",
"_odict_alltables",
"_repr_config",
"_sig_criteria"
]
_internal_attrs_set = set(_internal_attrs)
_repr_column_names = [
"id",
"bp1",
"bp2",
"strand",
"qual",
"svtype",
]
_repr_column_names_set = set(_repr_column_names)
def __init__(
self,
ls_vcf: List[Vcf] = None,
ls_patient_names: List[str] = None,
direct_tables: Optional[List[pd.DataFrame]] = None
):
if direct_tables is None:
df_id, df_patients, df_svpos, df_filters, odict_df_info, df_formats, odict_df_headers = self.__init__from_ls_vcf(ls_vcf, ls_patient_names)
self.__init__common(df_id, df_patients, df_svpos, df_filters, odict_df_info, df_formats, odict_df_headers)
else:
self.__init__common(*direct_tables)
def __init__from_ls_vcf(self, ls_vcf, ls_patient_names):
ls_df_id = []
ls_df_svpos = []
ls_df_filters = []
odict_ls_df_info = OrderedDict()
ls_df_formats = []
odict_ls_df_headers = OrderedDict()
# Header Integration
for vcf, patient_name in zip(ls_vcf, ls_patient_names):
for key, value in vcf._odict_df_headers.items():
value = value.copy()
if odict_ls_df_headers.get(key) is None:
odict_ls_df_headers[key] = [value]
else:
odict_ls_df_headers[key].append(value)
odict_df_headers = OrderedDict()
for key, value in odict_ls_df_headers.items():
for idx, df in enumerate(value):
if idx == 0:
df_merged = df
continue
on = list(df_merged.columns)
df_merged = df_merged.merge(df, how='outer', on=on)
odict_df_headers[key] = df_merged
# /Header Integration
ls_patient_id = [i for i in range(len(ls_patient_names))]
df_patients = pd.DataFrame({'id': ls_patient_id, 'patients': ls_patient_names})
for vcf, patient_id, patient_name in zip(ls_vcf, ls_patient_id, ls_patient_names):
df_svpos = vcf.get_table('positions')
df_filters = vcf.get_table('filters')
df_formats = vcf.get_table('formats')
df_id = df_svpos[['id']].copy()
df_id['patient_id'] = patient_id
df_id['global_id'] = str(patient_name) + '_' + df_id['id'].astype(str)
df_id = df_id[['global_id', 'patient_id', 'id']]
ls_df_id.append(df_id)
df_svpos['id'] = str(patient_name) + '_' + df_svpos['id'].astype(str)
ls_df_svpos.append(df_svpos)
df_filters['id'] = str(patient_name) + '_' + df_filters['id'].astype(str)
ls_df_filters.append(df_filters)
df_formats['id'] = str(patient_name) + '_' + df_formats['id'].astype(str)
ls_df_formats.append(df_formats)
for info in odict_df_headers['infos_meta'].id:
df_info_ = vcf._odict_df_info.get(info, None)
if df_info_ is None:
df_info = pd.DataFrame(columns=('id', 'value_idx', info.lower()))
else:
df_info = df_info_.copy()
df_info['id'] = str(patient_name) + '_' + df_info['id'].astype(str)
if odict_ls_df_info.get(info) is None:
odict_ls_df_info[info] = [df_info]
else:
odict_ls_df_info[info].append(df_info)
df_concat_id = pd.concat(ls_df_id, ignore_index=True)
df_concat_svpos = pd.concat(ls_df_svpos, ignore_index=True)
df_concat_filters = pd.concat(ls_df_filters, ignore_index=True)
df_concat_formats = pd.concat(ls_df_formats, ignore_index=True)
odict_df_info = OrderedDict()
for key, value in odict_ls_df_info.items():
odict_df_info[key] = pd.concat(value)
return (df_concat_id, df_patients, df_concat_svpos, df_concat_filters, odict_df_info, df_concat_formats, odict_df_headers)
def __init__common(self, df_id, df_patients, df_svpos, df_filters, odict_df_info, df_formats, odict_df_headers = {}):
self._df_id = df_id
self._df_patients = df_patients
self._df_svpos = df_svpos
self._df_filters = df_filters
self._odict_df_info = odict_df_info
self._df_formats = df_formats
self._odict_df_headers = odict_df_headers
self._ls_patients = df_patients['patients'].to_list()
self._ls_infokeys = [ x.lower() for x in odict_df_headers['infos_meta']['id'].tolist()]
ls_keys = ['global_id', 'patients', 'positions', 'filters'] + self._ls_infokeys + ['formats'] + \
list(odict_df_headers.keys())
ls_values = [df_id, df_patients, df_svpos, df_filters] + list(odict_df_info.values()) + [df_formats] | |
range(self.num_base):
pos = self.atom_positions[a1]
neighbors, distances = tree.query(pos, num_jobs, self.DIST_DECIMALS,
include_zero=True, compact=False)
neighbor_indices = indices[neighbors]
# Store neighbors of certain distance for each atom pair in the unit cell
for dist, idx in zip(distances, neighbor_indices):
a2 = idx[-1]
if dist:
neighbor_array[a1][a2].setdefault(dist, list()).append(idx)
# Remove extra neighbors
for a1, a2 in itertools.product(range(n), repeat=2):
neighbors = neighbor_array[a1][a2]
dists = list(sorted(neighbors.keys()))
for dist in dists[:max_distidx]:
neighbor_array[a1][a2][dist] = np.array(neighbors[dist])
for dist in dists[max_distidx:]:
del neighbor_array[a1][a2][dist]
return neighbor_array
def _analyze_raw(self, max_distidx):
"""Analyzes the structure of the raw lattice (without connections)."""
n = self.num_base
# Compute raw neighbors of unitcell
neighbor_array = self._compute_base_neighbors(max_distidx)
# Compute the raw distance matrix and the raw number of neighbors
raw_distance_matrix = [[list() for _ in range(n)] for _ in range(n)]
raw_num_neighbors = np.zeros((n, n), dtype=np.int)
for a1, a2 in itertools.product(range(n), repeat=2):
neighbors = neighbor_array[a1][a2]
raw_distance_matrix[a1][a2] += list(neighbors.keys())
raw_num_neighbors[a1, a2] = sum(len(x) for x in neighbors.values())
# Save raw neighbor data of the unitcell
self._raw_base_neighbors = neighbor_array
self._raw_distance_matrix = raw_distance_matrix
self._raw_num_neighbors = raw_num_neighbors
logger.debug("Number of raw neighbors:\n%s", raw_num_neighbors)
logger.debug("Raw distance-matrix:\n%s", raw_distance_matrix)
def analyze(self) -> None:
"""Analyzes the structure of the lattice and stores neighbor data of the unitcell.
Checks distances between all sites of the bravais lattice and saves n lowest values.
The neighbor lattice-indices of the unit-cell are also stored for later use.
This speeds up many calculations like finding nearest neighbors.
Raises
------
NoAtomsError
Raised if no atoms where added to the lattice. The atoms in the unit cell are needed
for computing the neighbors and distances of the lattice.
"""
logger.debug("Analyzing lattice")
if len(self._atoms) == 0:
raise NoAtomsError()
max_distidx = int(np.max(self._connections))
n = self.num_base
# Analyze the raw lattice
self._analyze_raw(max_distidx)
# Filter base neighbor data for configured connections and
# store neighbors and distances as list for each atom
base_neighbors = [collections.OrderedDict() for _ in range(n)]
base_distance_matrix = [[list() for _ in range(n)] for _ in range(n)]
unique_distances = set()
for a1, a2 in itertools.product(range(n), repeat=2):
neighbors = self._raw_base_neighbors[a1][a2]
dists = list(neighbors.keys())
max_dist = self._connections[a1, a2]
for distidx, dist in enumerate(dists[:max_dist]):
unique_distances.add(dist)
base_neighbors[a1].setdefault(dist, list()).extend(neighbors[dist])
base_distance_matrix[a1][a2].append(dist)
base_distance_matrix[a1][a2] = list(sorted(base_distance_matrix[a1][a2]))
# Convert base neighbors back to np.ndarray
for a1 in range(self.num_base):
for key, vals in base_neighbors[a1].items():
base_neighbors[a1][key] = np.asarray(vals)
max_num_distances = len(unique_distances)
# Compute number of neighbors for each atom in the unit cell
num_neighbors = np.zeros(self.num_base, dtype=np.int8)
for i, neighbors in enumerate(base_neighbors):
num_neighbors[i] = sum(len(indices) for indices in neighbors.values())
# store distance values / keys:
distances = np.zeros((self.num_base, max_num_distances))
for alpha in range(self.num_base):
try:
dists = list(base_neighbors[alpha].keys())
except ValueError:
dists = list()
distances[alpha, :len(dists)] = sorted(dists)
self._base_neighbors = base_neighbors
self._distance_matrix = base_distance_matrix
self._num_neighbors = num_neighbors
self._distances = distances
logger.debug("Number of neighbors:\n%s", num_neighbors)
logger.debug("Distance-matrix:\n%s", base_distance_matrix)
logger.debug("Distances:\n%s", distances)
def get_position(self, nvec: Optional[Union[int, Sequence[int]]] = None,
alpha: Optional[int] = 0) -> np.ndarray:
""" Returns the position for a given translation vector and site index
Parameters
----------
nvec: (N) array_like or int
translation vector.
alpha: int, optional
site index, default is 0.
Returns
-------
pos: (N) np.ndarray
"""
r = self._positions[alpha]
if nvec is None:
return r
n = np.atleast_1d(nvec)
return r + (self._vectors @ n) # self.translate(n, r)
def get_positions(self, indices):
"""Returns the positions for multiple lattice indices
Parameters
----------
indices: (N, D+1) array_like or int
List of lattice indices.
Returns
-------
pos: (N, D) np.ndarray
"""
nvecs, alphas = indices[:, :-1], indices[:, -1]
return self.translate(nvecs, np.array(self.atom_positions)[alphas])
def estimate_index(self, pos: Union[float, Sequence[float]]) -> np.ndarray:
""" Returns the nearest matching lattice index (n, alpha) for global position.
Parameters
----------
pos: array_like or float
global site position.
Returns
-------
n: np.ndarray
estimated translation vector n
"""
pos = np.asarray(pos)
n = np.asarray(np.round(self._vectors_inv @ pos, decimals=0), dtype="int")
return n
def get_neighbors(self, nvec: Optional[Union[int, Sequence[int]]] = None,
alpha: Optional[int] = 0,
distidx: Optional[int] = 0) -> np.ndarray:
""" Returns the neighour-indices of a given site by transforming stored neighbor indices.
Raises
------
NoBaseNeighboursError
Raised if the lattice distances and base-neighbors haven't been computed.
Parameters
----------
nvec: (D) array_like or int, optional
translation vector of site, the default is the origin.
alpha: int, optional
site index, default is 0.
distidx: int, default
index of distance to neighbors, defauzlt is 0 (nearest neighbors).
Returns
-------
indices: (N, D) np.ndarray
"""
if nvec is None:
nvec = np.zeros(self.dim)
if not self._base_neighbors:
raise NoBaseNeighborsError()
logger.debug("Computing neighbor-indices of %s, %i (distidx: %i)", nvec, alpha, distidx)
nvec = np.atleast_1d(nvec)
keys = list(sorted(self._base_neighbors[alpha].keys()))
dist = keys[distidx]
indices = self._base_neighbors[alpha][dist]
indices_transformed = indices.copy()
indices_transformed[:, :-1] += nvec.astype(np.int)
logger.debug("Neighbour-indices: %s", indices_transformed)
return indices_transformed
def get_neighbor_positions(self, nvec: Optional[Union[int, Sequence[int]]] = None,
alpha: Optional[int] = 0,
distidx: Optional[int] = 0) -> np.ndarray:
"""Returns the neighour-positions of a given site by transforming the neighbor positions.
Raises
------
NoBaseNeighboursError
Raised if the lattice distances and base-neighbors haven't been computed.
Parameters
----------
nvec: (D) array_like or int, optional
translation vector of site, the default is the origin.
alpha: int, optional
site index, default is 0.
distidx: int, default
index of distance to neighbors, default is 0 (nearest neighbors).
Returns
-------
positions: (N, D) np.ndarray
"""
if nvec is None:
nvec = np.zeros(self.dim)
if not self._base_neighbors:
raise NoBaseNeighborsError()
logger.debug("Computing neighbor-positions of %s, %i (distidx: %i)", nvec, alpha, distidx)
indices = self.get_neighbors(nvec, alpha, distidx)
nvecs, alphas = indices[:, :-1], indices[:, -1]
atom_pos = self._positions[alphas]
positions = self.translate(nvecs, atom_pos)
logger.debug("Neighbour-positions: %s", positions)
return positions
def get_neighbor_vectors(self, alpha: Optional[int] = 0,
distidx: Optional[int] = 0,
include_zero: Optional[bool] = False) -> np.ndarray:
"""Returns the neighours of a given site by transforming stored neighbor indices.
Raises
------
NoBaseNeighboursError
Raised if the lattice distances and base-neighbors haven't been computed.
Parameters
----------
alpha : int, optional
Index of the base atom. The default is the first atom in the unit cell.
distidx : int, default
Index of distance to neighbors, default is 0 (nearest neighbors).
include_zero : bool, optional
Flag if zero-vector is included in result. The default is False.
Returns
-------
vectors : np.ndarray
"""
if not self._base_neighbors:
raise NoBaseNeighborsError()
logger.debug("Computing neighbor-vectors of atom %i (distidx: %i)", alpha, distidx)
pos0 = self._positions[alpha]
pos1 = self.get_neighbor_positions(alpha=alpha, distidx=distidx)
if include_zero:
pos1 = np.append(np.zeros((1, self.dim)), pos1, axis=0)
vecs = pos1 - pos0
logger.debug("Neighbour-vectors: %s", vecs)
return vecs
def fourier_weights(self, k: ArrayLike, alpha: Optional[int] = 0,
distidx: Optional[int] = 0) -> np.ndarray:
"""Returns the Fourier-weight for a given vector.
Parameters
----------
k: array_like
The wavevector to compute the lattice Fourier-weights.
alpha : int, optional
Index of the base atom. The default is the first atom in the unit cell.
distidx : int, default
Index of distance to neighbors, default is 0 (nearest neighbors).
Returns
-------
weight: np.ndarray
"""
vecs = self.get_neighbor_vectors(alpha=alpha, distidx=distidx)
# weights = np.sum([np.exp(1j * np.dot(k, v)) for v in vecs])
weights = np.sum(np.exp(1j * np.inner(k, vecs)))
return weights
def get_base_atom_dict(self, atleast2d: Optional[bool] = True) \
-> Dict[Any, List[Union[np.ndarray, Any]]]:
""" Returns a dictionary containing the positions for eatch type of the base atoms.
Parameters
----------
atleast2d: bool, optional
If 'True', one-dimensional coordinates will be casted to 2D vectors.
Returns
-------
atom_pos: dict
"""
atom_pos = dict()
for atom, pos in zip(self._atoms, self._positions):
if atleast2d and self.dim == 1:
pos = np.array([pos, 0])
if atom.name in atom_pos.keys():
atom_pos[atom].append(pos)
else:
atom_pos[atom] = [pos]
return atom_pos
def build_translation_vectors(self, shape: Union[int, Sequence[int]],
relative: Optional[bool] = False,
pos: Optional[Union[float, Sequence[float]]] = None,
check: Optional[bool] = True,
dtype: Union[int, np.dtype] = None,
oversample: Optional[float] = 0.0,
) -> np.ndarray:
"""Constructs the translation vectors .math:`n` in the lattice basis in a given shape.
Raises
------
ValueError
Raised if the dimension of the position doesn't match the dimension of the lattice.
Parameters
----------
shape: (N) array_like or int
shape of finite size lattice to build.
relative: bool, optional
If 'True' the shape will be multiplied by the cell size of the model.
The default is ``True``.
pos: (N) array_like or int, optional
Optional position of the section to build. If 'None' the origin is used.
check: | |
val == Math.floor(val) ? 0 : Math.round((val-Math.floor(val))*Math.pow(10,r[1].length));
return val < 0 ? "-" + write_num(type, fmt, -val) : commaify(String(Math.floor(val))).replace(/^\\d,\\d{3}$/,"0$&").replace(/^\\d*$/,function($$) { return "00," + ($$.length < 3 ? pad(0,3-$$.length) : "") + $$; }) + "." + pad(rr,r[1].length,0);
}
switch(fmt) {
case "#,###": var x = commaify(String(Math.round(aval))); return x !== "0" ? sign + x : "";
default:
}
throw new Error("unsupported format |" + fmt + "|");
};
function split_fmt(fmt) {
var out = [];
var in_str = -1;
for(var i = 0, j = 0; i < fmt.length; ++i) {
if(in_str != -1) { if(fmt[i] == '"') in_str = -1; continue; }
if(fmt[i] == "_" || fmt[i] == "*" || fmt[i] == "\\\\") { ++i; continue; }
if(fmt[i] == '"') { in_str = i; continue; }
if(fmt[i] != ";") continue;
out.push(fmt.slice(j,i));
j = i+1;
}
out.push(fmt.slice(j));
if(in_str !=-1) throw new Error("Format |" + fmt + "| unterminated string at " + in_str);
return out;
}
SSF._split = split_fmt;
function eval_fmt(fmt, v, opts, flen) {
var out = [], o = "", i = 0, c = "", lst='t', q, dt, j;
fixopts(opts = (opts || {}));
var hr='H';
/* Tokenize */
while(i < fmt.length) {
switch((c = fmt[i])) {
case 'G': /* General */
if(fmt.substr(i, 7).toLowerCase() !== "general")
throw new Error('unrecognized character ' + fmt[i] + ' in ' +fmt);
out.push({t:'G',v:'General'}); i+=7; break;
case '"': /* Literal text */
for(o="";fmt[++i] !== '"' && i < fmt.length;) o += fmt[i];
out.push({t:'t', v:o}); ++i; break;
case '\\\\': var w = fmt[++i], t = "()".indexOf(w) === -1 ? 't' : w;
out.push({t:t, v:w}); ++i; break;
case '_': out.push({t:'t', v:" "}); i+=2; break;
case '@': /* Text Placeholder */
out.push({t:'T', v:v}); ++i; break;
case 'B': case 'b':
if(fmt[i+1] === "1" || fmt[i+1] === "2") {
if(!dt) dt = parse_date_code(v, opts, fmt[i+1] === "2");
q={t:'X', v:fmt.substr(i,2)}; out.push(q); lst = c; i+=2; break;
}
/* falls through */
case 'M': case 'D': case 'Y': case 'H': case 'S': case 'E':
c = c.toLowerCase();
/* falls through */
case 'm': case 'd': case 'y': case 'h': case 's': case 'e': case 'g':
if(v < 0) return "";
if(!dt) dt = parse_date_code(v, opts);
if(!dt) return "";
o = fmt[i]; while((fmt[++i]||"").toLowerCase() === c) o+=c;
if(c === 'm' && lst.toLowerCase() === 'h') c = 'M'; /* m = minute */
if(c === 'h') c = hr;
o = o.toLowerCase();
q={t:c, v:o}; out.push(q); lst = c; break;
case 'A':
if(!dt) dt = parse_date_code(v, opts);
if(!dt) return "";
q={t:c,v:"A"};
if(fmt.substr(i, 3) === "A/P") {q.v = dt.H >= 12 ? "P" : "A"; q.t = 'T'; hr='h';i+=3;}
else if(fmt.substr(i,5) === "AM/PM") { q.v = dt.H >= 12 ? "PM" : "AM"; q.t = 'T'; i+=5; hr='h'; }
else { q.t = "t"; i++; }
out.push(q); lst = c; break;
case '[':
o = c;
while(fmt[i++] !== ']' && i < fmt.length) o += fmt[i];
if(o.substr(-1) !== ']') throw 'unterminated "[" block: |' + o + '|';
if(o.match(/\\[[HhMmSs]*\\]/)) {
if(!dt) dt = parse_date_code(v, opts);
if(!dt) return "";
out.push({t:'Z', v:o.toLowerCase()});
} else { o=""; }
break;
/* Numbers */
case '.':
if(dt) {
o = c; while((c=fmt[++i]) === "0") o += c;
out.push({t:'s', v:o}); break;
}
/* falls through */
case '0': case '#':
o = c; while("0#?.,E+-%".indexOf(c=fmt[++i]) > -1 || c=='\\\\' && fmt[i+1] == "-" && "0#".indexOf(fmt[i+2])>-1) o += c;
out.push({t:'n', v:o}); break;
case '?':
o = fmt[i]; while(fmt[++i] === c) o+=c;
q={t:c, v:o}; out.push(q); lst = c; break;
case '*': ++i; if(fmt[i] == ' ' || fmt[i] == '*') ++i; break; // **
case '(': case ')': out.push({t:(flen===1?'t':c),v:c}); ++i; break;
case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9':
o = fmt[i]; while("0123456789".indexOf(fmt[++i]) > -1) o+=fmt[i];
out.push({t:'D', v:o}); break;
case ' ': out.push({t:c,v:c}); ++i; break;
default:
if(",$-+/():!^&'~{}<>=€acfijklopqrtuvwxz".indexOf(c) === -1)
throw 'unrecognized character ' + fmt[i] + ' in ' + fmt;
out.push({t:'t', v:c}); ++i; break;
}
}
var bt = 0, ss0 = 0, ssm;
for(i=out.length-1, lst='t'; i >= 0; --i) {
switch(out[i].t) {
case 'h': case 'H': out[i].t = hr; lst='h'; if(bt < 1) bt = 1; break;
case 's':
if((ssm=out[i].v.match(/\\.0+$/))) ss0=Math.max(ss0,ssm[0].length-1);
if(bt < 3) bt = 3;
/* falls through */
case 'd': case 'y': case 'M': case 'e': lst=out[i].t; break;
case 'm': if(lst === 's') { out[i].t = 'M'; if(bt < 2) bt = 2; } break;
case 'X': if(out[i].v === "B2");
break;
case 'Z':
if(bt < 1 && out[i].v.match(/[Hh]/)) bt = 1;
if(bt < 2 && out[i].v.match(/[Mm]/)) bt = 2;
if(bt < 3 && out[i].v.match(/[Ss]/)) bt = 3;
}
}
switch(bt) {
case 0: break;
case 1:
if(dt.u >= 0.5) { dt.u = 0; ++dt.S; }
if(dt.S >= 60) { dt.S = 0; ++dt.M; }
if(dt.M >= 60) { dt.M = 0; ++dt.H; }
break;
case 2:
if(dt.u >= 0.5) { dt.u = 0; ++dt.S; }
if(dt.S >= 60) { dt.S = 0; ++dt.M; }
break;
}
/* replace fields */
var nstr = "", jj;
for(i=0; i < out.length; ++i) {
switch(out[i].t) {
case 't': case 'T': case ' ': case 'D': break;
case 'X': delete out[i]; break;
case 'd': case 'm': case 'y': case 'h': case 'H': case 'M': case 's': case 'e': case 'b': case 'Z':
out[i].v = write_date(out[i].t, out[i].v, dt, ss0);
out[i].t = 't'; break;
case 'n': case '(': case '?':
jj = i+1;
while(out[jj] && ("?D".indexOf(out[jj].t) > -1 || (" t".indexOf(out[jj].t) > -1 && "?t".indexOf((out[jj+1]||{}).t)>-1 && (out[jj+1].t == '?' || out[jj+1].v == '/')) || out[i].t == '(' && (")n ".indexOf(out[jj].t) > -1) || out[jj].t == 't' && (out[jj].v == '/' || '$€'.indexOf(out[jj].v) > -1 || (out[jj].v == ' ' && (out[jj+1]||{}).t == '?')))) {
out[i].v += out[jj].v;
delete out[jj]; ++jj;
}
nstr += out[i].v;
i = jj-1; break;
case 'G': out[i].t = 't'; out[i].v = general_fmt(v,opts); break;
}
}
if(nstr) {
var ostr = write_num(nstr[0]=='(' ? '(' : 'n', nstr, (v<0&&nstr[0] == "-" ? -v : v));
jj=ostr.length-1;
var decpt = out.length;
for(i=0; i < out.length; ++i) if(out[i] && out[i].v.indexOf(".") > -1) { decpt = i; break; }
var lasti=out.length, vv;
if(decpt === out.length && !ostr.match(/E/)) {
for(i=out.length-1; i>= 0;--i) {
if(!out[i] || 'n?('.indexOf(out[i].t) === -1) continue;
vv = out[i].v.split("");
for(j=vv.length-1; j>=0; --j) {
if(jj>=0) vv[j] = ostr[jj--];
else vv[j] = "";
}
out[i].v = vv.join("");
out[i].t = 't';
lasti = i;
}
if(jj>=0 && lasti<out.length) out[lasti].v = ostr.substr(0,jj+1) + out[lasti].v;
}
else if(decpt !== out.length && !ostr.match(/E/)) {
jj = ostr.indexOf(".")-1;
for(i=decpt; i>= 0; --i) {
if(!out[i] || 'n?('.indexOf(out[i].t) === -1) continue;
vv = out[i].v.split("");
for(j=out[i].v.indexOf(".")>-1&&i==decpt?out[i].v.indexOf(".")-1:vv.length-1; j>=0; --j) {
if(jj>=0 && "0#".indexOf(vv[j])>-1) vv[j] = ostr[jj--];
else vv[j] = "";
}
out[i].v = vv.join("");
out[i].t = 't';
lasti = i;
}
if(jj>=0 && lasti<out.length) out[lasti].v = ostr.substr(0,jj+1) + out[lasti].v;
jj = ostr.indexOf(".")+1;
for(i=decpt; i<out.length; ++i) {
if(!out[i] || 'n?('.indexOf(out[i].t) === -1 && i != decpt ) continue;
vv = out[i].v.split("");
for(j=out[i].v.indexOf(".")>-1&&i==decpt?out[i].v.indexOf(".")+1:0; j<vv.length; ++j) {
if(jj<ostr.length) vv[j] = ostr[jj++];
else vv[j] = "";
}
out[i].v = vv.join("");
out[i].t = 't';
lasti = i;
}
}
}
for(i=0; i<out.length; ++i) if(out[i] && 'n(?'.indexOf(out[i].t)>-1) {
out[i].v = write_num(out[i].t, out[i].v, (flen >1 && v < 0 && i>0 && out[i-1].v == "-" ? -v:v));
out[i].t = 't';
}
return out.map(function(x){return x.v;}).join("");
}
SSF._eval = eval_fmt;
function choose_fmt(fmt, v, o) {
if(typeof fmt === 'number') fmt = ((o&&o.table) ? o.table : table_fmt)[fmt];
if(typeof fmt === "string") fmt = split_fmt(fmt);
var l = fmt.length;
if(l<4 && fmt[l-1].indexOf("@")>-1) --l;
switch(fmt.length) {
case 1: fmt = fmt[0].indexOf("@")>-1 ? ["General", "General", "General", fmt[0]] : [fmt[0], fmt[0], fmt[0], "@"]; break;
case 2: fmt = fmt[1].indexOf("@")>-1 ? [fmt[0], fmt[0], fmt[0], fmt[1]] : [fmt[0], fmt[1], fmt[0], "@"]; break;
case 3: fmt = fmt[2].indexOf("@")>-1 ? [fmt[0], fmt[1], fmt[0], fmt[2]] : [fmt[0], fmt[1], fmt[2], "@"]; break;
case 4: break;
default: throw "cannot find right format for |" + fmt + "|";
}
if(typeof v !== "number") return [fmt.length, fmt[3]];
var ff = v > 0 ? fmt[0] : v < 0 ? fmt[1] : fmt[2];
if(fmt[0].match(/\\[[=<>]/) || fmt[1].match(/\\[[=<>]/)) {
var chk = function(v, rr, out) {
if(!rr) return null;
var found = false;
var thresh = Number(rr[2]);
switch(rr[1]) {
case "=": if(v == thresh) found = true; break;
case ">": if(v > thresh) found = true; break;
case "<": if(v < thresh) found = true; break;
case "<>": if(v != thresh) found = true; break;
case ">=": if(v >= thresh) found = true; break;
case "<=": if(v <= thresh) found = true; break;
}
return found ? out : null;
};
var m1 = fmt[0].match(/\\[([=<>]*)([-]?\\d+)\\]/);
var m2 = fmt[1].match(/\\[([=<>]*)([-]?\\d+)\\]/);
return chk(v, m1, [l, fmt[0]]) || chk(v, m2, [l, fmt[1]]) || [l, fmt[m1&&m2?2:1]];
}
return [l, ff];
}
var format = function format(fmt,v,o) {
fixopts(o = (o||{}));
if(typeof fmt === "string" && fmt.toLowerCase() === "general") return general_fmt(v, o);
if(typeof fmt === 'number') fmt = (o.table || table_fmt)[fmt];
var f = choose_fmt(fmt, v, o);
if(f[1].toLowerCase() === "general") return general_fmt(v,o);
if(v === true) v = "TRUE"; if(v === false) v = "FALSE";
if(v === "" || typeof v === "undefined") return "";
return eval_fmt(f[1], v, o, f[0]);
};
SSF._choose = choose_fmt;
SSF._table = table_fmt;
SSF.load = function(fmt, idx) { table_fmt[idx] = fmt; };
SSF.format = format;
SSF.get_table = function() { return table_fmt; };
SSF.load_table = function(tbl) { for(var i=0; i!=0x0188; ++i) if(tbl[i]) SSF.load(tbl[i], i); };
};
make_ssf(SSF);
/* [MS-OLEPS] v20130118 */
/* [MS-OSHARED] v20130211 */
/* [MS-OLEPS] 2.2 PropertyType */
{
var VT_EMPTY = 0x0000;
var VT_NULL = 0x0001;
var | |
<reponame>scivision/pysat
"""
tests the pysat utils area
"""
import os
import tempfile
import warnings
from nose.tools import assert_raises, raises
import numpy as np
import pandas as pds
import pysat
import sys
if sys.version_info[0] >= 3:
from importlib import reload as re_load
else:
re_load = reload
# ----------------------------------
# test netCDF export file support
def prep_dir(inst=None):
if inst is None:
inst = pysat.Instrument(platform='pysat', name='testing')
# create data directories
try:
os.makedirs(inst.files.data_path)
except OSError:
pass
def remove_files(inst):
# remove any files
temp_dir = inst.files.data_path
for the_file in os.listdir(temp_dir):
if (the_file == 'pysat_test_ncdf.nc'):
file_path = os.path.join(temp_dir, the_file)
if os.path.isfile(file_path):
os.unlink(file_path)
def test_deprecation_warning_computational_form():
"""Test if computational form in utils is deprecated"""
data = pds.Series([0, 1, 2])
warnings.simplefilter("always")
dslice1 = pysat.ssnl.computational_form(data)
with warnings.catch_warnings(record=True) as war:
dslice2 = pysat.utils.computational_form(data)
assert (dslice1 == dslice2).all()
assert len(war) >= 1
assert war[0].category == DeprecationWarning
class TestBasics():
def setup(self):
"""Runs before every method to create a clean testing setup."""
# store current pysat directory
self.data_path = pysat.data_dir
def teardown(self):
"""Runs after every method to clean up previous testing."""
pysat.utils.set_data_dir(self.data_path)
#######################
# test pysat data dir options
def test_set_data_dir(self):
"""update data_dir"""
pysat.utils.set_data_dir('.')
check1 = (pysat.data_dir == '.')
# Check if next load of pysat remembers the change
pysat._files = re_load(pysat._files)
pysat._instrument = re_load(pysat._instrument)
re_load(pysat)
check2 = (pysat.data_dir == '.')
assert check1 & check2
def test_set_data_dir_no_store(self):
"""update data_dir without storing"""
pysat.utils.set_data_dir('.', store=False)
check1 = (pysat.data_dir == '.')
# Check if next load of pysat remembers old settings
pysat._files = re_load(pysat._files)
pysat._instrument = re_load(pysat._instrument)
re_load(pysat)
check2 = (pysat.data_dir == self.data_path)
assert check1 & check2
@raises(ValueError)
def test_set_data_dir_wrong_path(self):
"""update data_dir with an invalid path"""
pysat.utils.set_data_dir('not_a_directory', store=False)
def test_initial_pysat_load(self):
import shutil
saved = False
try:
root = os.path.join(os.getenv('HOME'), '.pysat')
new_root = os.path.join(os.getenv('HOME'), '.saved_pysat')
shutil.move(root, new_root)
saved = True
except:
pass
re_load(pysat)
try:
if saved:
# remove directory, trying to be careful
os.remove(os.path.join(root, 'data_path.txt'))
os.rmdir(root)
shutil.move(new_root, root)
except:
pass
assert True
class TestScaleUnits():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.deg_units = ["deg", "degree", "degrees", "rad", "radian",
"radians", "h", "hr", "hrs", "hours"]
self.dist_units = ["m", "km", "cm"]
self.vel_units = ["m/s", "cm/s", "km/s", 'm s$^{-1}$', 'cm s$^{-1}$',
'km s$^{-1}$', 'm s-1', 'cm s-1', 'km s-1']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.deg_units, self.dist_units, self.vel_units
def test_scale_units_same(self):
""" Test scale_units when both units are the same """
scale = pysat.utils.scale_units("happy", "happy")
assert scale == 1.0
def test_scale_units_angles(self):
"""Test scale_units for angles """
for out_unit in self.deg_units:
scale = pysat.utils.scale_units(out_unit, "deg")
if out_unit.find("deg") == 0:
assert scale == 1.0
elif out_unit.find("rad") == 0:
assert scale == np.pi / 180.0
else:
assert scale == 1.0 / 15.0
def test_scale_units_dist(self):
"""Test scale_units for distances """
for out_unit in self.dist_units:
scale = pysat.utils.scale_units(out_unit, "m")
if out_unit == "m":
assert scale == 1.0
elif out_unit.find("km") == 0:
assert scale == 0.001
else:
assert scale == 100.0
def test_scale_units_vel(self):
"""Test scale_units for velocities """
for out_unit in self.vel_units:
scale = pysat.utils.scale_units(out_unit, "m/s")
if out_unit.find("m") == 0:
assert scale == 1.0
elif out_unit.find("km") == 0:
assert scale == 0.001
else:
assert scale == 100.0
def test_scale_units_bad_output(self):
"""Test scale_units for unknown output unit"""
assert_raises(ValueError, pysat.utils.scale_units, "happy", "m")
try:
pysat.utils.scale_units('happy', 'm')
except ValueError as verr:
assert str(verr).find('output unit') > 0
def test_scale_units_bad_input(self):
"""Test scale_units for unknown input unit"""
assert_raises(ValueError, pysat.utils.scale_units, "m", "happy")
try:
pysat.utils.scale_units('m', 'happy')
except ValueError as verr:
assert str(verr).find('input unit') > 0
def test_scale_units_bad_match_pairs(self):
"""Test scale_units for mismatched input for all pairings"""
assert_raises(ValueError, pysat.utils.scale_units, "m", "m/s")
assert_raises(ValueError, pysat.utils.scale_units, "m", "deg")
assert_raises(ValueError, pysat.utils.scale_units, "h", "km/s")
def test_scale_units_bad_match_message(self):
"""Test scale_units error message for mismatched input"""
assert_raises(ValueError, pysat.utils.scale_units, "m", "m/s")
try:
pysat.utils.scale_units('m', 'm/s')
except ValueError as verr:
assert str(verr).find('Cannot scale') >= 0
assert str(verr).find('unknown units') < 0
def test_scale_units_both_bad(self):
"""Test scale_units for bad input and output"""
assert_raises(ValueError, pysat.utils.scale_units, "happy", "sad")
try:
pysat.utils.scale_units('happy', 'sad')
except ValueError as verr:
assert str(verr).find('unknown units') > 0
class TestBasicNetCDF4():
def setup(self):
"""Runs before every method to create a clean testing setup."""
# store current pysat directory
self.data_path = pysat.data_dir
# create temporary directory
dir_name = tempfile.mkdtemp()
pysat.utils.set_data_dir(dir_name, store=False)
self.testInst = pysat.Instrument(platform='pysat',
name='testing',
sat_id='100',
clean_level='clean')
self.testInst.pandas_format = True
# create testing directory
prep_dir(self.testInst)
def teardown(self):
"""Runs after every method to clean up previous testing."""
remove_files(self.testInst)
pysat.utils.set_data_dir(self.data_path, store=False)
del self.testInst
@raises(ValueError)
def test_load_netcdf4_empty_filenames(self):
pysat.utils.load_netcdf4(fnames=None)
def test_basic_write_and_read_netcdf4_unimited_time(self):
"""Test reading and writing netcdf4, unlimited time dimension"""
self.test_basic_write_and_read_netcdf4_default_format(unlimited=True)
return
def test_basic_write_and_read_netcdf4_default_format(self, unlimited=False):
# create a bunch of files by year and doy
prep_dir(self.testInst)
outfile = os.path.join(self.testInst.files.data_path,
'pysat_test_ncdf.nc')
self.testInst.load(2009, 1)
self.testInst.to_netcdf4(outfile, unlimited_time=unlimited)
loaded_inst, meta = \
pysat.utils.load_netcdf4(outfile,
pandas_format=self.testInst.pandas_format)
self.testInst.data = \
self.testInst.data.reindex(sorted(self.testInst.data.columns),
axis=1)
loaded_inst = loaded_inst.reindex(sorted(loaded_inst.columns),
axis=1)
keys = self.testInst.data.columns
for key in keys:
assert(np.all(self.testInst[key] == loaded_inst[key]))
def test_basic_write_and_read_netcdf4_mixed_case_format(self):
# create a bunch of files by year and doy
prep_dir(self.testInst)
outfile = os.path.join(self.testInst.files.data_path,
'pysat_test_ncdf.nc')
self.testInst.load(2009, 1)
# modify data names in data
original = sorted(self.testInst.data.columns)
self.testInst.data = self.testInst.data.rename(str.upper, axis='columns')
self.testInst.to_netcdf4(outfile, preserve_meta_case=True)
loaded_inst, meta = pysat.utils.load_netcdf4(outfile)
self.testInst.data = \
self.testInst.data.reindex(sorted(self.testInst.data.columns),
axis=1)
loaded_inst = loaded_inst.reindex(sorted(loaded_inst.columns), axis=1)
# check that names are lower case when written
assert(np.all(original == loaded_inst.columns))
for key in self.testInst.data.columns:
assert(np.all(self.testInst[key] == loaded_inst[key.lower()]))
# modify metadata names in data
self.testInst.meta.data = self.testInst.meta.data.rename(str.upper, axis='index')
# write file
self.testInst.to_netcdf4(outfile, preserve_meta_case=True)
# load file
loaded_inst, meta = pysat.utils.load_netcdf4(outfile)
# check that names are upper case when written
assert(np.all(sorted(self.testInst.data.columns) == sorted(loaded_inst.columns)))
@raises(Exception)
def test_write_netcdf4_duplicate_variable_names(self):
# create a bunch of files by year and doy
prep_dir(self.testInst)
outfile = os.path.join(self.testInst.files.data_path,
'pysat_test_ncdf.nc')
self.testInst.load(2009, 1)
self.testInst['MLT'] = 1
self.testInst.to_netcdf4(outfile, preserve_meta_case=True)
def test_write_and_read_netcdf4_default_format_w_compression(self):
# create a bunch of files by year and doy
prep_dir(self.testInst)
outfile = os.path.join(self.testInst.files.data_path,
'pysat_test_ncdf.nc')
self.testInst.load(2009, 1)
self.testInst.to_netcdf4(outfile, zlib=True)
loaded_inst, meta = pysat.utils.load_netcdf4(outfile)
self.testInst.data = \
self.testInst.data.reindex(sorted(self.testInst.data.columns),
axis=1)
loaded_inst = loaded_inst.reindex(sorted(loaded_inst.columns), axis=1)
for key in self.testInst.data.columns:
assert (np.all(self.testInst[key] == loaded_inst[key]))
def test_write_and_read_netcdf4_default_format_w_weird_epoch_name(self):
# create a bunch of files by year and doy
prep_dir(self.testInst)
outfile = os.path.join(self.testInst.files.data_path,
'pysat_test_ncdf.nc')
self.testInst.load(2009, 1)
self.testInst.to_netcdf4(outfile, epoch_name='Santa')
loaded_inst, meta = pysat.utils.load_netcdf4(outfile,
epoch_name='Santa')
self.testInst.data = \
self.testInst.data.reindex(sorted(self.testInst.data.columns),
axis=1)
loaded_inst = loaded_inst.reindex(sorted(loaded_inst.columns), axis=1)
for key in self.testInst.data.columns:
assert (np.all(self.testInst[key] == loaded_inst[key]))
def test_write_and_read_netcdf4_default_format_higher_order(self):
# create a bunch of files by year and doy
test_inst = pysat.Instrument('pysat', 'testing2d')
prep_dir(test_inst)
outfile = os.path.join(test_inst.files.data_path, 'pysat_test_ncdf.nc')
test_inst.load(2009, 1)
test_inst.to_netcdf4(outfile)
loaded_inst, meta = pysat.utils.load_netcdf4(outfile)
test_inst.data = test_inst.data.reindex(sorted(test_inst.data.columns),
axis=1)
loaded_inst = loaded_inst.reindex(sorted(loaded_inst.columns), axis=1)
prep_dir(test_inst)
# test Series of DataFrames
test_list = []
for frame1, frame2 in zip(test_inst.data['profiles'],
loaded_inst['profiles']):
test_list.append(np.all((frame1 == frame2).all()))
loaded_inst.drop('profiles', inplace=True, axis=1)
test_inst.data.drop('profiles', inplace=True, axis=1)
# second series of frames
for frame1, frame2 in zip(test_inst.data['alt_profiles'],
loaded_inst['alt_profiles']):
test_list.append(np.all((frame1 == frame2).all()))
loaded_inst.drop('alt_profiles', inplace=True, axis=1)
test_inst.data.drop('alt_profiles', inplace=True, axis=1)
# check series of series
for frame1, frame2 in zip(test_inst.data['series_profiles'],
loaded_inst['series_profiles']):
test_list.append(np.all((frame1 == frame2).all()))
loaded_inst.drop('series_profiles', inplace=True, axis=1)
test_inst.data.drop('series_profiles', inplace=True, axis=1)
assert(np.all((test_inst.data == loaded_inst).all()))
assert np.all(test_list)
def test_write_and_read_netcdf4_default_format_higher_order_w_zlib(self):
# create a bunch of files by year and doy
test_inst = pysat.Instrument('pysat', 'testing2d')
prep_dir(test_inst)
outfile = os.path.join(test_inst.files.data_path, 'pysat_test_ncdf.nc')
test_inst.load(2009, 1)
test_inst.to_netcdf4(outfile, zlib=True)
loaded_inst, meta = pysat.utils.load_netcdf4(outfile)
test_inst.data = test_inst.data.reindex(sorted(test_inst.data.columns),
axis=1)
loaded_inst = loaded_inst.reindex(sorted(loaded_inst.columns), axis=1)
prep_dir(test_inst)
# test Series of DataFrames
test_list = []
for frame1, frame2 in zip(test_inst.data['profiles'],
loaded_inst['profiles']):
test_list.append(np.all((frame1 == frame2).all()))
loaded_inst.drop('profiles', inplace=True, axis=1)
test_inst.data.drop('profiles', inplace=True, axis=1)
# second series of frames
for frame1, frame2 in zip(test_inst.data['alt_profiles'],
loaded_inst['alt_profiles']):
test_list.append(np.all((frame1 == frame2).all()))
loaded_inst.drop('alt_profiles', inplace=True, axis=1)
test_inst.data.drop('alt_profiles', inplace=True, axis=1)
# check series of series
for frame1, frame2 in zip(test_inst.data['series_profiles'],
loaded_inst['series_profiles']):
test_list.append(np.all((frame1 == frame2).all()))
loaded_inst.drop('series_profiles', inplace=True, axis=1)
test_inst.data.drop('series_profiles', inplace=True, axis=1)
assert (np.all((test_inst.data == loaded_inst).all()))
assert np.all(test_list)
def test_netcdf_prevent_attribute_override(self):
"""Test that attributes will not be overridden by default"""
self.testInst.load(2009, 1)
try:
assert self.testInst.bespoke # should raise
except AttributeError:
pass
# instrument meta attributes immutable upon load
assert not self.testInst.meta.mutable
try:
self.testInst.meta.bespoke = True
except AttributeError:
pass
def test_netcdf_attribute_override(self):
"""Test that attributes in netcdf file may be overridden"""
self.testInst.load(2009, 1)
self.testInst.meta.mutable = True
self.testInst.meta.bespoke = True
self.testInst.meta.transfer_attributes_to_instrument(self.testInst)
# ensure custom meta attribute assigned to instrument
assert self.testInst.bespoke
fname = 'output.nc'
outfile = os.path.join(self.testInst.files.data_path, fname)
self.testInst.to_netcdf4(outfile)
data, meta = pysat.utils.load_netcdf4(outfile)
# custom attribute correctly read from file
assert meta.bespoke
class TestBasicNetCDF4xarray():
def setup(self):
"""Runs before every method to create a clean testing setup."""
# store current pysat directory
self.data_path = pysat.data_dir
# create temporary directory
dir_name = tempfile.mkdtemp()
pysat.utils.set_data_dir(dir_name, store=False)
self.testInst = pysat.Instrument(platform='pysat',
name='testing2d_xarray',
sat_id='100',
clean_level='clean')
self.testInst.pandas_format = False
# create | |
request.POST.get('r8c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c6 = request.POST.get('r8c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c7 = request.POST.get('r8c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c8 = request.POST.get('r8c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c9 = request.POST.get('r8c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c10 = request.POST.get('r8c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c1 = request.POST.get('r9c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c2 = request.POST.get('r9c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c3 = request.POST.get('r9c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c4 = request.POST.get('r9c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c5 = request.POST.get('r9c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c6 = request.POST.get('r9c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c7 = request.POST.get('r9c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c8 = request.POST.get('r9c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c9 = request.POST.get('r9c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c10 = request.POST.get('r9c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c1 = request.POST.get('r10c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c2 = request.POST.get('r10c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c3 = request.POST.get('r10c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c4 = request.POST.get('r10c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c5 = request.POST.get('r10c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c6 = request.POST.get('r10c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c7 = request.POST.get('r10c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c8 = request.POST.get('r10c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c9 = request.POST.get('r10c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c10 = request.POST.get('r10c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
body = '<!doctype html>' + \
'<html lang="en">' + \
'<head>' + \
'<meta charset="utf-8">' + \
'<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">' + \
'<link rel="stylesheet"' + \
'href="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/css/bootstrap.min.css"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous">' + \
'<title>Vendor service pricing sheet</title>' + \
'</head>' + \
'<body>' + \
'<div class="container">' + \
'<div class="card text-center">' + \
'<div class="card-header text-center">Vendor service pricing sheet</div>' + \
'<div class="card-body">'
body += '<h6>Comapny name : ' + company_name + '</h6>' + \
'<h6>Share capital : ' + share_capital + '</h6>' + \
'<h6>Head office address : ' + head_office_address + '</h6>' + \
'<h6>Establishment number : ' + establishment_number + '</h6>' + \
'<h6>Register of Trade and Companies : ' + register_of_trade_and_companies + '</h6>' + \
'<h6>Main activities : ' + main_activities + '</h6>' + \
'<h6>Activity number : ' + activity_number + '</h6>' + \
'<h6>Intra-community VAT number : ' + intra_community_vat_number + '</h6>' + \
'<h6>President : ' + president + '</h6>' + \
'<h6>Registration date : ' + registration_date + '</h6>' + \
'<br>'
body += '<br>'
body += '<table class="table table-striped table-bordered">' + \
'<thead>' + \
'<tr>' + \
'<th scope="col">Details</th>' + \
'<th scope="col">Service</th>' + \
'<th scope="col">Vendor 1 [Quantity]</th>' + \
'<th scope="col">Vendor 1 [Money per hour]</th>' + \
'<th scope="col">Vendor 1 [Total]</th>' + \
'<th scope="col">Vendor 2 [Quantity]</th>' + \
'<th scope="col">Vendor 2 [Money per hour]</th>' + \
'<th scope="col">Vendor 2 [Total]</th>' + \
'<th scope="col">Vendor 3 [Quantity]</th>' + \
'<th scope="col">Vendor 3 [Money per hour]</th>' + \
'<th scope="col">Vendor 3 [Total]</th>' + \
'</tr>' + \
'</thead>' + \
'<tbody>' + \
'<tr>' + \
'<td>1</td>' + \
'<td>' + r1c1 + '</td>' + \
'<td>' + r1c2 + '</td>' + \
'<td>' + r1c3 + '</td>' + \
'<td>' + r1c4 + '</td>' + \
'<td>' + r1c5 + '</td>' + \
'<td>' + r1c6 + '</td>' + \
'<td>' + r1c7 + '</td>' + \
'<td>' + r1c8 + '</td>' + \
'<td>' + r1c9 + '</td>' + \
'<td>' + r1c10 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>2</td>' + \
'<td>' + r2c1 + '</td>' + \
'<td>' + r2c2 + '</td>' + \
'<td>' + r2c3 + '</td>' + \
'<td>' + r2c4 + '</td>' + \
'<td>' + r2c5 + '</td>' + \
'<td>' + r2c6 + '</td>' + \
'<td>' + r2c7 + '</td>' + \
'<td>' + r2c8 + '</td>' + \
'<td>' + r2c9 + '</td>' + \
'<td>' + r2c10 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>3</td>' + \
'<td>' + r3c1 + '</td>' + \
'<td>' + r3c2 + '</td>' + \
'<td>' + r3c3 + '</td>' + \
'<td>' + r3c4 + '</td>' + \
'<td>' + r3c5 + '</td>' + \
'<td>' + r3c6 + '</td>' + \
'<td>' + r3c7 + '</td>' + \
'<td>' + r3c8 + '</td>' + \
'<td>' + r3c9 + '</td>' + \
'<td>' + r3c10 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>4</td>' + \
'<td>' + r4c1 + '</td>' + \
'<td>' + r4c2 + '</td>' + \
'<td>' + r4c3 + '</td>' + \
'<td>' + r4c4 + '</td>' + \
'<td>' + r4c5 + '</td>' + \
'<td>' + r4c6 + '</td>' + \
'<td>' + r4c7 + '</td>' + \
'<td>' + r4c8 + '</td>' + \
'<td>' + r4c9 + '</td>' + \
'<td>' + r4c10 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>5</td>' + \
'<td>' + r5c1 + '</td>' + \
'<td>' + r5c2 + '</td>' + \
'<td>' + r5c3 + '</td>' + \
'<td>' + r5c4 + '</td>' + \
'<td>' + r5c5 + '</td>' + \
'<td>' + r5c6 + '</td>' + \
'<td>' + r5c7 + '</td>' + \
'<td>' + r5c8 + '</td>' + \
'<td>' + r5c9 + '</td>' + \
'<td>' + r5c10 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>6</td>' + \
'<td>' + r6c1 + '</td>' + \
'<td>' + r6c2 + '</td>' + \
'<td>' + r6c3 + '</td>' + \
'<td>' + r6c4 + '</td>' + \
'<td>' + r6c5 + '</td>' + \
'<td>' + r6c6 + '</td>' + \
'<td>' + r6c7 + '</td>' + \
'<td>' + r6c8 + '</td>' + \
'<td>' + r6c9 + '</td>' + \
'<td>' + r6c10 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>7</td>' + \
'<td>' + r7c1 + '</td>' + \
'<td>' + r7c2 + '</td>' + \
'<td>' + r7c3 + '</td>' + \
'<td>' + r7c4 + '</td>' + \
'<td>' + r7c5 + '</td>' + \
'<td>' + r7c6 + '</td>' + \
'<td>' + r7c7 + '</td>' + \
'<td>' + r7c8 + '</td>' + \
'<td>' + r7c9 + '</td>' + \
'<td>' + r7c10 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>8</td>' + \
'<td>' + r8c1 + '</td>' + \
'<td>' + r8c2 + '</td>' + \
'<td>' + r8c3 + '</td>' + \
'<td>' + r8c4 + '</td>' + \
'<td>' + r8c5 + '</td>' + \
'<td>' + r8c6 + '</td>' + \
'<td>' + r8c7 + '</td>' + \
'<td>' + r8c8 + '</td>' + \
'<td>' + r8c9 + '</td>' + \
'<td>' + r8c10 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>9</td>' + \
'<td>' + r9c1 + '</td>' + \
'<td>' + r9c2 + '</td>' + \
'<td>' + r9c3 + '</td>' + \
'<td>' + r9c4 + '</td>' + \
'<td>' + r9c5 + '</td>' + \
'<td>' + r9c6 + '</td>' + \
'<td>' + r9c7 + '</td>' + \
'<td>' + r9c8 + '</td>' + \
'<td>' + r9c9 + '</td>' + \
'<td>' + r9c10 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>10</td>' + \
'<td>' + r10c1 + '</td>' + \
'<td>' + r10c2 + '</td>' + \
'<td>' + r10c3 + '</td>' + \
'<td>' + r10c4 + '</td>' + \
'<td>' + r10c5 + '</td>' + \
'<td>' + r10c6 + '</td>' + \
'<td>' + r10c7 + '</td>' + \
'<td>' + r10c8 + '</td>' + \
'<td>' + r10c9 + '</td>' + \
'<td>' + r10c10 + '</td>' + \
'</tr>' + \
'</tbody>' + \
'</table>'
body += '<br>' + \
'</div>' + \
'</div>' + \
'</div>' + \
'<br>' + \
'<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/js/bootstrap.bundle.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'</body>' + \
'</html>'
options = {
'page-size': 'A4',
'header-center': 'Vendor service pricing sheet',
'footer-left': 'Company : ' + company_name + ' [' + establishment_number + ']',
'footer-right': '[page] sur [topage]',
'encoding': 'UTF-8',
'no-outline': None,
'custom-header': [
('Accept-Encoding', 'pdf')
]
}
# path_wkthmltopdf = 'static/reporting/static/wkhtmltopdf.exe'
# config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)
# output = pdfkit.from_string(body, output_path=False, configuration=config, options=options)
output = pdfkit.from_string(body, output_path=False, options=options)
response = HttpResponse(output, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="vendor_service_pricing_sheet.pdf"'
return response
def start_up_budget_calculator(request):
return render(request, 'reporting/start_up_budget_calculator.html')
def generate_html_to_pdf_start_up_budget_calculator(request):
company_name = request.POST.get('company_name').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
share_capital = request.POST.get('share_capital').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
head_office_address = request.POST.get('head_office_address').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
establishment_number = request.POST.get('establishment_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
register_of_trade_and_companies = request.POST.get('register_of_trade_and_companies').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
main_activities = request.POST.get('main_activities').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
activity_number = request.POST.get('activity_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
intra_community_vat_number = request.POST.get('intra_community_vat_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
president = request.POST.get('president').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
registration_date = request.POST.get('registration_date').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c1 = request.POST.get('r1c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c2 = request.POST.get('r1c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c3 = request.POST.get('r1c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c4 = request.POST.get('r1c4').replace('\t', | |
not self.canvas._is_idle_drawing # standard
and not getattr(self.canvas, '_draw_pending', None) # pyqt5
):
self._subplots_kw.update(width=width, height=height)
super().set_size_inches(width, height, forward=forward)
def set_alignx(self, value):
"""
Set the *x* axis label alignment mode.
"""
self.stale = True
self._alignx = bool(value)
def set_aligny(self, value):
"""
Set the *y* axis label alignment mode.
"""
self.stale = True
self._aligny = bool(value)
def set_sharex(self, value):
"""
Set the *x* axis sharing level.
"""
value = int(value)
if value not in range(4):
raise ValueError(
'Invalid sharing level sharex={value!r}. '
'Axis sharing level can be 0 (share nothing), '
'1 (hide axis labels), '
'2 (share limits and hide axis labels), or '
'3 (share limits and hide axis and tick labels).'
)
self.stale = True
self._sharex = value
def set_sharey(self, value):
"""
Set the *y* axis sharing level.
"""
value = int(value)
if value not in range(4):
raise ValueError(
'Invalid sharing level sharey={value!r}. '
'Axis sharing level can be 0 (share nothing), '
'1 (hide axis labels), '
'2 (share limits and hide axis labels), or '
'3 (share limits and hide axis and tick labels).'
)
self.stale = True
self._sharey = value
def set_spanx(self, value):
"""
Set the *x* axis label spanning mode.
"""
self.stale = True
self._spanx = bool(value)
def set_spany(self, value):
"""
Set the *y* axis label spanning mode.
"""
self.stale = True
self._spany = bool(value)
@property
def gridspec(self):
"""
The single `GridSpec` instance used for all subplots
in the figure.
"""
return self._gridspec_main
@property
def ref(self):
"""
The reference axes number. The `axwidth`, `axheight`, and `aspect`
`subplots` and `figure` arguments are applied to this axes, and aspect
ratio is conserved for this axes in tight layout adjustment.
"""
return self._ref
@ref.setter
def ref(self, ref):
if not isinstance(ref, Integral) or ref < 1:
raise ValueError(
f'Invalid axes number {ref!r}. Must be integer >=1.')
self.stale = True
self._ref = ref
def _iter_axes(self):
"""
Iterates over all axes and panels in the figure belonging to the
`~proplot.axes.Axes` class. Excludes inset and twin axes.
"""
axs = []
for ax in (
*self._axes_main,
*self._left_panels, *self._right_panels,
*self._bottom_panels, *self._top_panels
):
if not ax or not ax.get_visible():
continue
axs.append(ax)
for ax in axs:
for side in ('left', 'right', 'bottom', 'top'):
for iax in getattr(ax, '_' + side + '_panels'):
if not iax or not iax.get_visible():
continue
axs.append(iax)
return axs
def _journals(journal):
"""
Return the width and height corresponding to the given journal.
"""
# Get dimensions for figure from common journals.
value = JOURNAL_SPECS.get(journal, None)
if value is None:
raise ValueError(
f'Unknown journal figure size specifier {journal!r}. '
'Current options are: '
+ ', '.join(map(repr, JOURNAL_SPECS.keys()))
)
# Return width, and optionally also the height
width, height = None, None
try:
width, height = value
except (TypeError, ValueError):
width = value
return width, height
def _axes_dict(naxs, value, kw=False, default=None):
"""
Return a dictionary that looks like ``{1:value1, 2:value2, ...}`` or
``{1:{key1:value1, ...}, 2:{key2:value2, ...}, ...}`` for storing
standardized axes-specific properties or keyword args.
"""
# First build up dictionary
# 1) 'string' or {1:'string1', (2,3):'string2'}
if not kw:
if np.iterable(value) and not isinstance(value, (str, dict)):
value = {num + 1: item for num, item in enumerate(value)}
elif not isinstance(value, dict):
value = {range(1, naxs + 1): value}
# 2) {'prop':value} or {1:{'prop':value1}, (2,3):{'prop':value2}}
else:
nested = [isinstance(value, dict) for value in value.values()]
if not any(nested): # any([]) == False
value = {range(1, naxs + 1): value.copy()}
elif not all(nested):
raise ValueError(
'Pass either of dictionary of key value pairs or '
'a dictionary of dictionaries of key value pairs.'
)
# Then *unfurl* keys that contain multiple axes numbers, i.e. are meant
# to indicate properties for multiple axes at once
kwargs = {}
for nums, item in value.items():
nums = np.atleast_1d(nums)
for num in nums.flat:
if not kw:
kwargs[num] = item
else:
kwargs[num] = item.copy()
# Fill with default values
for num in range(1, naxs + 1):
if num not in kwargs:
if kw:
kwargs[num] = {}
else:
kwargs[num] = default
# Verify numbers
if {*range(1, naxs + 1)} != {*kwargs.keys()}:
raise ValueError(
f'Have {naxs} axes, but {value!r} has properties for axes '
+ ', '.join(map(repr, sorted(kwargs))) + '.'
)
return kwargs
def subplots(
array=None, ncols=1, nrows=1,
ref=1, order='C',
aspect=1, figsize=None,
width=None, height=None, journal=None,
axwidth=None, axheight=None,
hspace=None, wspace=None, space=None,
hratios=None, wratios=None,
width_ratios=None, height_ratios=None,
left=None, bottom=None, right=None, top=None,
basemap=False, proj=None, projection=None,
proj_kw=None, projection_kw=None,
**kwargs
):
"""
Create a figure with a single subplot or arbitrary grids of subplots,
analogous to `matplotlib.pyplot.subplots`. The subplots can be drawn with
arbitrary projections.
Parameters
----------
array : 2d array-like of int, optional
Array specifying complex grid of subplots. Think of
this array as a "picture" of your figure. For example, the array
``[[1, 1], [2, 3]]`` creates one long subplot in the top row, two
smaller subplots in the bottom row. Integers must range from 1 to the
number of plots.
``0`` indicates an empty space. For example, ``[[1, 1, 1], [2, 0, 3]]``
creates one long subplot in the top row with two subplots in the bottom
row separated by a space.
ncols, nrows : int, optional
Number of columns, rows. Ignored if `array` was passed.
Use these arguments for simpler subplot grids.
order : {'C', 'F'}, optional
Whether subplots are numbered in column-major (``'C'``) or row-major
(``'F'``) order. Analogous to `numpy.array` ordering. This controls
the order that subplots appear in the `subplot_grid` returned by this
function, and the order of subplot a-b-c labels (see
`~proplot.axes.Axes.format`).
figsize : length-2 tuple, optional
Tuple specifying the figure `(width, height)`.
width, height : float or str, optional
The figure width and height. If you specify just one, the aspect
ratio `aspect` of the reference subplot `ref` will be preserved.
ref : int, optional
The reference subplot number. The `axwidth`, `axheight`, and `aspect`
keyword args are applied to this subplot, and the aspect ratio is
conserved for this subplot in the tight layout adjustment. If you
did not specify `width_ratios` and `height_ratios`, the `axwidth`,
`axheight`, and `aspect` settings will apply to *all* subplots --
not just the `ref` subplot.
axwidth, axheight : float or str, optional
The width, height of the reference subplot. Units are interpreted by
`~proplot.utils.units`. Default is :rc:`subplots.axwidth`. Ignored
if `width`, `height`, or `figsize` was passed.
aspect : float or length-2 list of floats, optional
The reference subplot aspect ratio, in numeric form (width divided by
height) or as a (width, height) tuple. Ignored if `width`, `height`,
or `figsize` was passed.
journal : str, optional
String name corresponding to an academic journal standard that is used
to control the figure width and, if specified, the height. See the
below table.
=========== ==================== ==========================================================================================================================================================
Key Size description Organization
=========== ==================== ==========================================================================================================================================================
``'aaas1'`` 1-column `American Association for the Advancement of Science <https://www.sciencemag.org/authors/instructions-preparing-initial-manuscript>`__ (e.g. *Science*)
``'aaas2'`` 2-column ”
``'agu1'`` 1-column `American Geophysical Union <https://publications.agu.org/author-resource-center/figures-faq/>`__
``'agu2'`` 2-column ”
``'agu3'`` full height 1-column ”
``'agu4'`` full height 2-column ”
``'ams1'`` 1-column `American Meteorological Society <https://www.ametsoc.org/ams/index.cfm/publications/authors/journal-and-bams-authors/figure-information-for-authors/>`__
``'ams2'`` small 2-column ”
``'ams3'`` medium 2-column ”
``'ams4'`` full 2-column ”
``'nat1'`` 1-column `Nature Research <https://www.nature.com/nature/for-authors/formatting-guide>`__
``'nat2'`` 2-column ”
``'pnas1'`` 1-column `Proceedings of the National Academy of Sciences <http://www.pnas.org/page/authors/submission>`__
``'pnas2'`` 2-column ”
``'pnas3'`` landscape page ”
=========== ==================== ==========================================================================================================================================================
width_ratios, height_ratios : float or list thereof, optional
Passed to `GridSpec`, denotes the width
and height ratios for the subplot grid. Length of `width_ratios`
must match the number of rows, and length of `height_ratios` must
match the number of columns.
wratios, hratios
Aliases for `height_ratios`, `width_ratios`.
wspace, hspace, space : float or str or list thereof, optional
Passed to `GridSpec`, denotes the
spacing between grid columns, rows, and both, respectively. If float
or string, expanded into lists of length ``ncols-1`` (for `wspace`)
or length ``nrows-1`` (for `hspace`).
Units are interpreted by `~proplot.utils.units` for each element of
the list. By default, these are determined by the "tight
layout" algorithm.
left, right, top, | |
import work_wth_data as wd
import display_data as dd
import os
from os import path
from datetime import date
#------------------------------------------------------------------------------------
def get_yes_or_no(message):
"""Input a message, returns 'Y' or 'N'"""
valid_input = False
while not valid_input:
answer = input(message)
answer = answer.upper() # convert to upper case
if answer == 'Y' or answer == 'N':
valid_input = True
else:
print('Please enter Y for yes or N for no.')
return answer
def list_to_string_with_comma(thing):
"""Input a list, returns every item in the list as a string with commas in between"""
string = ""
for item in thing:
string += str(item) + ','
return string[:-1]
#-----------------------------------------------------------------------------------
def get_year():
"""returns the year as a string"""
file = open('settings.txt', 'r')
year = file.readline()
year = year[-5:-1]
file.close()
return year
def get_user():
"""returns the username as a string"""
file = open('settings.txt', 'r')
file.readline()
user = file.readline()
user = user[14:-1]
file.close()
return user
def get_users():
"""returns a list composed of usernames"""
file = open('settings.txt', 'r')
file.readline()
file.readline()
users = file.readline()
users = users.split(',')
file.close()
users = users[1:]
users[-1] = users[-1][:-1]
return users
def get_curr_balance():
"""returns the current balance as a float"""
file = open('settings.txt', 'r')
for i in range(3):
file.readline()
j = i
j += 1
balance = file.readline()
balance = float(balance[17:-1])
file.close()
return balance
def get_balances():
"""returns the balances as a list of strings"""
file = open('settings.txt', 'r')
for i in range(4):
file.readline()
j = i
j += 1
balances = file.readline()
balances = balances.split(',')
file.close()
balances = balances[1:]
balances[-1] = balances[-1][:-1]
return balances
def check_user_year():
"""returns boolean if current year exists for current user"""
path = get_user() + '/' + get_year() + '.csv'
return os.path.exists(path)
#------------------------------------------------------------------------------------
def change_year(new_year):
"""Input year(xxxx)(str or int), changes current year"""
file = open('settings.txt', 'r')
outfile = open('tmpset.txt', 'w')
file.readline()
outfile.write('Year: ' + str(new_year) + '\n')
for line in file:
outfile.write(line)
file.close()
outfile.close()
wd.rewrite_final('tmpset.txt', 'settings.txt')
print("Done!")
if not check_user_year():
set_new_year()
def man_change_balance(new_amount):
"""updates balance ledger"""
wd.man_balance_ledger_update(new_amount)
change_balance(new_amount)
def change_balance(new_amount):
"""input new balance amount(any dtype), updates current balance, user balance"""
file = open('settings.txt', 'r')
outfile = open('tmpset.txt', 'w')
for i in range(3):
line = file.readline()
outfile.write(line)
j = i
j += 1
file.readline()
outfile.write('Current balance: ' + '{:.2f}'.format(new_amount) + '\n')
file.readline()
balances = get_balances()
balances[get_users().index(get_user())] = '{:.2f}'.format(new_amount)
outfile.write('All balance:,' + list_to_string_with_comma(balances) + '\n')
for line in file:
outfile.write(line)
file.close()
outfile.close()
wd.rewrite_final('tmpset.txt', 'settings.txt')
def change_curr_balance_to_user():
"""Used by change_user, title explains it"""
file = open('settings.txt', 'r')
outfile = open('tmpset.txt', 'w')
for i in range(3):
line = file.readline()
outfile.write(line)
j = i
j += 1
balance = get_balances()
balance = balance[get_users().index(get_user())]
file.readline()
outfile.write('Current balance: ' + '{:.2f}'.format(float(balance)) + '\n')
for line in file:
outfile.write(line)
file.close()
outfile.close()
wd.rewrite_final('tmpset.txt', 'settings.txt')
def change_user(username):
"""Input username, changes user, does nothing if user doesn't exist"""
if username in get_users():
file = open('settings.txt', 'r')
outfile = open('tmpset.txt', 'w')
outfile.write(file.readline())
file.readline()
outfile.write('Current User: ' + username + '\n')
for line in file:
outfile.write(line)
file.close()
outfile.close()
wd.rewrite_final('tmpset.txt', 'settings.txt')
change_curr_balance_to_user()
else:
print("User not found")
#--------------------------------------------------------------------------------------------------
def set_new_year():
"""sets up new year if it doesn't exist for user"""
path = get_user() + '/' + 'all.csv'
allfile = open(path, 'r')
spend_default = allfile.readline()
newyear_path = get_user() + '/' + get_year() + '.csv'
new_year_file = open(newyear_path, 'w')
new_year_file.write(spend_default)
allfile.close()
new_year_file.close()
path = get_user() + '/' + 'rec_all.csv'
allfile = open(path, 'r')
rec_default = allfile.readline()
newyear_path = get_user() + '/' + get_year() + 'rec.csv'
new_year_file = open(newyear_path, 'w')
new_year_file.write(rec_default)
allfile.close()
new_year_file.close()
print("Congratulations on making it to the new year!!!")
def add_new_user(username):
"""Input new username, sets up files and stuff"""
if username in get_users():
print("User already exists!")
return
file = open('settings.txt', 'r')
outfile = open('tmpset.txt', 'w')
outfile.write(file.readline())
file.readline()
outfile.write('Current User: ' + username + '\n')
outfile.write(file.readline()[:-1] + ',' + username + '\n')
file.readline()
outfile.write('Current balance: 0.00\n')
outfile.write(file.readline()[:-1] + ',0.00' + '\n')
for line in file:
outfile.write(line)
file.close()
outfile.close()
wd.rewrite_final('tmpset.txt', 'settings.txt')
os.mkdir(get_user())
os.mkdir((get_user() + '/' + 'tmp'))
os.mkdir((get_user() + '/' + 'tmp' + '/' + 'last'))
new_all_path = get_user() + '/' + 'all.csv'
spend_default = 'Date,Amount,Description,Category,Method of purchase\n'
newyear_path = get_user() + '/' + get_year() + '.csv'
new_year_file = open(newyear_path, 'w')
new_year_file.write(spend_default)
new_all_file = open(new_all_path, 'w')
new_all_file.write(spend_default)
new_all_file.close()
new_year_file.close()
new_balancesheet = open(get_user() + '/' + 'balance.csv', 'w')
new_balancesheet.write('Date,Amount,Change Amount,Description,Category\n')
today = date.today()
today = today.strftime("%m-%d-%Y")
new_balancesheet.write(today + ',0.00,0.00,Init,Init')
new_balancesheet.close()
new_recall = open(get_user() + '/' + 'rec_all.csv', 'w')
new_recall.write('Date,Amount,Description,Category\n')
new_balancesheet.close()
new_rec_year = open(get_user() + '/' + get_year() + 'rec.csv', 'w')
new_rec_year.write('Date,Amount,Description,Category\n')
new_rec_year.close()
new_last_entry = open(get_user() + '/last_entry.csv', 'w')
new_last_entry.write('Date Recorded,Date,Amount,Description,Category,(method of purchase)\n')
new_last_entry.close()
print("Welcome!! User added")
def get_format_balance():
"""Returns formatted curr balance($x,xxx.xx)"""
balance = '{:.2f}'.format(get_curr_balance())
balance = balance.split('.')
bit = balance[0][::-1]
new_thing = ''
for i in range(1, len(bit) + 1):
if (i-1) % 3 == 0 and i != 1:
new_thing += ','
new_thing += bit[i - 1]
balance = '$' + new_thing[::-1] + '.' + balance[1]
return balance
#--------------------------------------------------------------------------------------------------
def add_entry_menu():
"""Add entry meny"""
entry_menu = "Add Entry Menu:\nYear is: " + get_year() + '\n---------------\n'
entry_menu += "1: Spent Money\n"
entry_menu += "2: Received Money\n"
entry_menu += "3: Change Year\n"
entry_menu += "4: Show Last Entry\n"
entry_menu += "5: Back to Main Menu\n"
print(entry_menu)
menu_select = input("Please select an option: ")
print('\n\n\n\n\n\n\n')
if menu_select == '1':
wd.spent_money()
elif menu_select == '2':
wd.received_money()
elif menu_select == '3':
change_year(input('Please enter a year(yyyy): '))
elif menu_select == '4':
print(wd.display_line("Date Recorded,Date,Amount,Description,Category,(method of purchase)"))
print(wd.display_line(wd.get_last_entry()))
elif menu_select == '5':
main_menu()
return
else:
print("Input not understood. Please try again.")
add_entry_menu()
def editor_menu():
"""Edit entry menu"""
edit_menu = "Edit Entry Menu:\n----------------\n"
edit_menu += "1: Reenter last spent entry\n"
edit_menu += "2: Remove older spent entry\n"
edit_menu += "3: Reenter last received entry\n"
edit_menu += "4: Remove older received entry\n\n"
edit_menu += "5: Display last SPENT entry\n"
edit_menu += "6: Display last RECEIVED entry\n"
edit_menu += "7: Back to Main Menu\n"
print(edit_menu)
menu_select = input("Please select an option: ")
print('\n\n\n\n\n\n\n')
if menu_select == '1':
wd.edit_last_spent()
elif menu_select == '2':
wd.edit_older_spent(True,'','')
elif menu_select == '3':
wd.edit_last_rec()
elif menu_select == '4':
wd.edit_older_rec(True,'','')
elif menu_select == '5':
print(wd.display_line('Date,Amount,Description,Category,Method of purchase'))
print(wd.display_line(wd.get_last_spent()))
elif menu_select == '6':
print(wd.display_line('Date,Amount,Description,Category'))
print(wd.display_line(wd.get_last_received()))
elif menu_select == '7':
main_menu()
return
else:
print("Input not understood. Please try again.")
editor_menu()
def entry_search():
search_menu = "Entry search menu:\n------------------\n"
search_menu += "1: Search Spent\n"
search_menu += "2: Search Received\n"
search_menu += "3: Back to Main Menu\n"
print(search_menu)
menu_select = input("Please select an option: ")
print('\n\n\n\n\n\n\n')
if menu_select == '1':
wd.search_spent_all(True,'','')
elif menu_select == '2':
wd.search_received_all(True,'','')
elif menu_select == '3':
main_menu()
return
else:
print("Input not understood. Please try again.")
entry_search()
def stat_display_menu():
display_menu = "Stat Display Menu:\n------------------\n"
display_menu += "1: Total SPENT for Date Range\n"
display_menu += "2: Total RECEIVED for Date Range\n\n"
display_menu += "3: Line graph\n"
display_menu += "4: Pie chart\n"
display_menu += "5: Bar graph\n"
display_menu += "6: Back to Main Menu\n"
print(display_menu)
menu_select = input("Please select an option: ")
print('\n\n\n\n\n\n\n')
if menu_select == '1':
dd.total_for_date_range('s', True,'','')
elif menu_select == '2':
dd.total_for_date_range('r', True,'','')
elif menu_select == '3':
dd.line_graph()
elif menu_select == '4':
dd.pie_chart()
elif menu_select == '5':
dd.bar_graph()
elif menu_select == '6':
main_menu()
return
else:
print("Input not understood. Please try again.")
stat_display_menu()
def settings_menu():
set_menu = "Settings and User Menu:\n-----------------------\n"
set_menu += "1: Change user\n"
set_menu += "2: Manually set balance\n"
set_menu += "3: Change year\n"
set_menu += "4: View users and balances\n"
set_menu += "5: Add new User\n"
set_menu += "6: Back to Main Menu\n"
print(set_menu)
menu_select = input("Please select an option: ")
print('\n\n\n\n\n\n\n')
if menu_select == '1':
change_user(input("Please enter a username: "))
elif menu_select == '2':
man_change_balance(float(input('Please enter a new balance(xxxx.xx): ')))
elif menu_select == '3':
change_year(input('Please enter a year(yyyy): '))
elif menu_select == '4':
print('Users and Balances')
[print(values) for values in zip(get_users(), get_balances())]
print('')
elif menu_select == '5':
new_user = input('Please enter a new username: ')
if get_yes_or_no('Is this correct? (y or n) "' + new_user + '"') == 'Y':
add_new_user(new_user)
else:
print('You declined the username!')
elif menu_select == '6':
main_menu()
return
else:
print("Input not understood. Please try again.")
settings_menu()
def main_menu():
string = "Current User is: "
string += get_user() + "\n"
string += "Year is: " + get_year() + '\n'
string += "Current balance is: | |
<filename>Chapter-07/collections/ansible_collections/community/aws/plugins/modules/aws_kms_info.py
#!/usr/bin/python
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: aws_kms_info
version_added: 1.0.0
short_description: Gather information about AWS KMS keys
description:
- Gather information about AWS KMS keys including tags and grants
author: "<NAME> (@willthames)"
options:
alias:
description:
- Alias for key.
- Mutually exclusive with I(key_id) and I(filters).
required: false
aliases:
- key_alias
type: str
version_added: 1.4.0
key_id:
description:
- Key ID or ARN of the key.
- Mutually exclusive with I(alias) and I(filters).
required: false
aliases:
- key_arn
type: str
version_added: 1.4.0
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
The filters aren't natively supported by boto3, but are supported to provide similar
functionality to other modules. Standard tag filters (C(tag-key), C(tag-value) and
C(tag:tagName)) are available, as are C(key-id) and C(alias)
- Mutually exclusive with I(alias) and I(key_id).
type: dict
pending_deletion:
description: Whether to get full details (tags, grants etc.) of keys pending deletion
default: False
type: bool
keys_attr:
description:
- Returning the C(keys) attribute conflicted with the builtin keys()
method on dictionaries and as such was deprecated.
- This parameter now does nothing, and after version C(4.0.0) this
parameter will be removed.
type: bool
version_added: 2.0.0
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all KMS keys
- community.aws.aws_kms_info:
# Gather information about all keys with a Name tag
- community.aws.aws_kms_info:
filters:
tag-key: Name
# Gather information about all keys with a specific name
- community.aws.aws_kms_info:
filters:
"tag:Name": Example
'''
RETURN = '''
kms_keys:
description: list of keys
type: complex
returned: always
contains:
key_id:
description: ID of key
type: str
returned: always
sample: <KEY>
key_arn:
description: ARN of key
type: str
returned: always
sample: arn:aws:kms:ap-southeast-2:123456789012:key/<KEY>
key_state:
description: The state of the key
type: str
returned: always
sample: PendingDeletion
key_usage:
description: The cryptographic operations for which you can use the key.
type: str
returned: always
sample: ENCRYPT_DECRYPT
origin:
description:
The source of the key's key material. When this value is C(AWS_KMS),
AWS KMS created the key material. When this value is C(EXTERNAL), the
key material was imported or the CMK lacks key material.
type: str
returned: always
sample: AWS_KMS
aws_account_id:
description: The AWS Account ID that the key belongs to
type: str
returned: always
sample: 1234567890123
creation_date:
description: Date of creation of the key
type: str
returned: always
sample: "2017-04-18T15:12:08.551000+10:00"
description:
description: Description of the key
type: str
returned: always
sample: "My Key for Protecting important stuff"
enabled:
description: Whether the key is enabled. True if C(KeyState) is true.
type: str
returned: always
sample: false
enable_key_rotation:
description: Whether the automatically key rotation every year is enabled. Returns None if key rotation status can't be determined.
type: bool
returned: always
sample: false
aliases:
description: list of aliases associated with the key
type: list
returned: always
sample:
- aws/acm
- aws/ebs
tags:
description: dictionary of tags applied to the key. Empty when access is denied even if there are tags.
type: dict
returned: always
sample:
Name: myKey
Purpose: protecting_stuff
policies:
description: list of policy documents for the keys. Empty when access is denied even if there are policies.
type: list
returned: always
sample:
Version: "2012-10-17"
Id: "auto-ebs-2"
Statement:
- Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
Effect: "Allow"
Principal:
AWS: "*"
Action:
- "kms:Encrypt"
- "kms:Decrypt"
- "kms:ReEncrypt*"
- "kms:GenerateDataKey*"
- "kms:CreateGrant"
- "kms:DescribeKey"
Resource: "*"
Condition:
StringEquals:
kms:CallerAccount: "111111111111"
kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
- Sid: "Allow direct access to key metadata to the account"
Effect: "Allow"
Principal:
AWS: "arn:aws:iam::111111111111:root"
Action:
- "kms:Describe*"
- "kms:Get*"
- "kms:List*"
- "kms:RevokeGrant"
Resource: "*"
grants:
description: list of grants associated with a key
type: complex
returned: always
contains:
constraints:
description: Constraints on the encryption context that the grant allows.
See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details
type: dict
returned: always
sample:
encryption_context_equals:
"aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz"
creation_date:
description: Date of creation of the grant
type: str
returned: always
sample: "2017-04-18T15:12:08+10:00"
grant_id:
description: The unique ID for the grant
type: str
returned: always
sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234
grantee_principal:
description: The principal that receives the grant's permissions
type: str
returned: always
sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
issuing_account:
description: The AWS account under which the grant was issued
type: str
returned: always
sample: arn:aws:iam::01234567890:root
key_id:
description: The key ARN to which the grant applies.
type: str
returned: always
sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
name:
description: The friendly name that identifies the grant
type: str
returned: always
sample: xyz
operations:
description: The list of operations permitted by the grant
type: list
returned: always
sample:
- Decrypt
- RetireGrant
retiring_principal:
description: The principal that can retire the grant
type: str
returned: always
sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
'''
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
# Caching lookup for aliases
_aliases = dict()
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
def get_kms_keys_with_backoff(connection):
paginator = connection.get_paginator('list_keys')
return paginator.paginate().build_full_result()
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
def get_kms_aliases_with_backoff(connection):
paginator = connection.get_paginator('list_aliases')
return paginator.paginate().build_full_result()
def get_kms_aliases_lookup(connection):
if not _aliases:
for alias in get_kms_aliases_with_backoff(connection)['Aliases']:
# Not all aliases are actually associated with a key
if 'TargetKeyId' in alias:
# strip off leading 'alias/' and add it to key's aliases
if alias['TargetKeyId'] in _aliases:
_aliases[alias['TargetKeyId']].append(alias['AliasName'][6:])
else:
_aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]]
return _aliases
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
def get_kms_tags_with_backoff(connection, key_id, **kwargs):
return connection.list_resource_tags(KeyId=key_id, **kwargs)
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
def get_kms_grants_with_backoff(connection, key_id, **kwargs):
params = dict(KeyId=key_id)
if kwargs.get('tokens'):
params['GrantTokens'] = kwargs['tokens']
paginator = connection.get_paginator('list_grants')
return paginator.paginate(**params).build_full_result()
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
def get_kms_metadata_with_backoff(connection, key_id):
return connection.describe_key(KeyId=key_id)
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
def list_key_policies_with_backoff(connection, key_id):
paginator = connection.get_paginator('list_key_policies')
return paginator.paginate(KeyId=key_id).build_full_result()
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
def get_key_policy_with_backoff(connection, key_id, policy_name):
return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name)
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
def get_enable_key_rotation_with_backoff(connection, key_id):
try:
current_rotation_status = connection.get_key_rotation_status(KeyId=key_id)
except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e:
return None
return current_rotation_status.get('KeyRotationEnabled')
def canonicalize_alias_name(alias):
if alias is None:
return None
if alias.startswith('alias/'):
return alias
return 'alias/' + alias
def get_kms_tags(connection, module, key_id):
# Handle pagination here as list_resource_tags does not have
# a paginator
kwargs = {}
tags = []
more = True
while more:
try:
tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs)
tags.extend(tag_response['Tags'])
except is_boto3_error_code('AccessDeniedException'):
tag_response = {}
except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to obtain key tags")
if tag_response.get('NextMarker'):
kwargs['Marker'] = tag_response['NextMarker']
else:
more = False
return tags
def get_kms_policies(connection, module, key_id):
try:
policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames']
return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for
policy in policies]
except is_boto3_error_code('AccessDeniedException'):
return []
except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to obtain key policies")
def key_matches_filter(key, filtr):
if filtr[0] == 'key-id':
return filtr[1] == key['key_id']
if filtr[0] == 'tag-key':
return filtr[1] in key['tags']
if filtr[0] == 'tag-value':
return filtr[1] in key['tags'].values()
if filtr[0] == 'alias':
return filtr[1] in key['aliases']
if filtr[0].startswith('tag:'):
tag_key = filtr[0][4:]
if tag_key not in key['tags']:
return False
return key['tags'].get(tag_key) == filtr[1]
def key_matches_filters(key, filters):
if not filters:
return True
else:
return all(key_matches_filter(key, filtr) for filtr in filters.items())
def get_key_details(connection, module, key_id, tokens=None):
if not tokens:
tokens = []
try:
result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
# Make sure we have the canonical ARN, we might have been passed an alias
key_id = result['Arn']
except is_boto3_error_code('NotFoundException'):
return None
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to obtain key metadata")
result['KeyArn'] = result.pop('Arn')
try:
aliases = get_kms_aliases_lookup(connection)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to obtain aliases")
# We can only get aliases for our own account, so we don't need the full ARN
result['aliases'] = aliases.get(result['KeyId'], [])
result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id)
if module.params.get('pending_deletion'):
return camel_dict_to_snake_dict(result)
try:
result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to obtain key grants")
tags = get_kms_tags(connection, module, key_id)
result = camel_dict_to_snake_dict(result)
result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue')
result['policies'] = get_kms_policies(connection, module, key_id)
return result
def get_kms_info(connection, module):
if module.params.get('key_id'):
key_id = module.params.get('key_id')
details = get_key_details(connection, module, key_id)
if details:
return [details]
return []
elif module.params.get('alias'):
alias = canonicalize_alias_name(module.params.get('alias'))
details = get_key_details(connection, module, alias)
if details:
return [details]
return []
else:
try:
keys = get_kms_keys_with_backoff(connection)['Keys']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to obtain keys")
return [get_key_details(connection, module, | |
from ast import cmpop
import os
import numpy as np
import matplotlib.pyplot as plt
import imageio
from matplotlib.colors import Normalize
import ipywidgets as ipw
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import splev
from .. import splineutils
out = ipw.Output()
def show_geometry_props(data, res, size=(16, 9), titles=["Length", "Area", "Circularity"]):
"""
Display length, area and circularity information for time-lapse.
Parameters
----------
data: data object
created from dataset.Data
res: res object
created from results.Results
size: tuple
image size
titles: list
titles for each plot
Returns
-------
fig: matplotlib figure
ax: matplotlib axis
"""
length = np.zeros((data.K,))
area = np.zeros((data.K,))
for k in range(data.K):
length[k] = splineutils.spline_contour_length(res.spline[k])
area[k] = splineutils.spline_area(res.spline[k])
fig, ax = plt.subplots(1, 3, figsize=size)
ax[0].plot(length)
ax[0].set_title(titles[0])
ax[1].plot(area)
ax[1].set_title(titles[1])
ax[2].plot(length ** 2 / area / 4 / np.pi)
ax[2].set_title(titles[2])
fig.tight_layout()
return fig, ax
def show_geometry(data, res, size=(16, 9), prop='length', title=None):
"""
Display length, area and circularity information for time-lapse.
Parameters
----------
data: data object
created from dataset.Data
res: res object
created from results.Results
size: tuple
image size
prop: str
property to display
title: str
title for plot
Returns
-------
fig: matplotlib figure
ax: matplotlib axis
"""
length = np.zeros((data.K,))
area = np.zeros((data.K,))
for k in range(data.K):
length[k] = splineutils.spline_contour_length(res.spline[k])
area[k] = splineutils.spline_area(res.spline[k])
title_dict = {'length': 'Length', 'area': 'Area', 'circularity': 'Circularity'}
fig, ax = plt.subplots(figsize=size)
if prop == 'length':
ax.plot(length)
elif prop == 'area':
ax.plot(area)
elif prop == 'circularity':
ax.plot(length ** 2 / area / 4 / np.pi)
if title is None:
ax.set_title(title_dict[prop])
else:
ax.set_title(title)
fig.tight_layout()
return fig, ax
def show_edge_line_aux(N, s, color, lw, fig_ax=None):
"""
Plot as spline s of color color by interpolating N points.
Parameters
----------
N: int
number of interpolation points
s: spline object
as returned by splprep
color: matplotlib color
lw: curve thickness
fig_ax: tuple
matplotlib figure and axes
Returns
-------
fig: matplotlib figure
ax: matplotlib axis
"""
if fig_ax is None:
fig, ax = plt.subplots()
else:
fig, ax = fig_ax
c = splev(np.linspace(0, 1, N + 1), s)
ax.plot(c[0], c[1], color=color, zorder=50, lw=lw)
fig.tight_layout()
return fig, ax
def show_edge_line(
N, s, lw=0.1, fig_ax=None, cmap_name='jet', show_colorbar=True, colorbar_label="Frame index"):
"""
Draw the cell-edge contour of all time points
using a colored line.
Parameters
----------
N: int
number of interpolation points
s: spline object
as returned by splprep
lw: curve thickness
fig_ax: tuple
matplotlib figure and axes
cmap_name: str
color map name
show_colorbar: bool
show colorbar
colorbar_label: str
colorbar label
Returns
-------
fig: matplotlib figure
ax: matplotlib axis
"""
if fig_ax is None:
fig, ax = plt.subplots()
else:
fig, ax = fig_ax
# Evaluate splines at window locations and on fine-resolution grid
K = len(s)
cmap = plt.cm.get_cmap(cmap_name)
for k in range(K):
fig, ax = show_edge_line_aux(N, s[k], cmap(k / (K - 1)), lw, fig_ax=(fig, ax))
if show_colorbar:
fig.colorbar(
plt.cm.ScalarMappable(norm=Normalize(vmin=0, vmax=K - 1), cmap=cmap),
label=colorbar_label,
)
fig.tight_layout()
return fig, ax
def show_edge_overview(
param, data, res, lw=0.1, size=(12, 9), fig_ax=None,
title="Edge overview", cmap_image='gray', cmap_contour='jet',
show_colorbar=True, colorbar_label="Frame index"):
"""
Display image of first time point and all contour splines
overlayed on top.
Parameters
----------
param: param object
created from parameters.Param
data: data object
created from dataset.Data
res: res object
created from results.Results
lw: float
spline curves thickness
size: tuple
image size
fig_ax: tuple
matplotlib figure and axes
title: str
title for plot
cmap_image: matplotlib color map
image color map
cm_contour: matplotlib color map
contour color map
show_colorbar: bool
show colorbar
colorbar_label: str
colorbar label
Returns
-------
fig: matplotlib figure
ax: matplotlib axis
"""
if fig_ax is None:
fig, ax = plt.subplots(figsize=size)
else:
fig, ax = fig_ax
ax.set_title(title)
ax.imshow(data.load_frame_morpho(0), cmap=cmap_image)
fig, ax = show_edge_line(
param.n_curve, res.spline, lw, (fig, ax),
cmap_name=cmap_contour, show_colorbar=show_colorbar, colorbar_label=colorbar_label)
fig.tight_layout()
return fig, ax
def show_edge_vectorial_aux(param, data, res, k, curvature=False, fig_ax=None):
"""
Plot time point k with the contour and the displacement vectors
overlayed. The contour is color-coded to represent either
displacement or curvature.
Parameters
----------
param: param object
created from parameters.Param
data: data object
created from dataset.Data
res: res object
created from results.Results
k: int
time point
curvature: bool
represent curvature instead of displacement
Returns
-------
fig: matplotlib figure
ax: matplotlib axis
"""
if fig_ax is None:
fig, ax = plt.subplots()
else:
fig, ax = fig_ax
ax.clear()
plt.figure(fig.number)
# plt.clf()
ax.set_title("Frame " + str(k) + " to frame " + str(k + 1))
ax.imshow(data.load_frame_morpho(k), cmap="gray")
#N = param.n_curve + 1
if curvature:
N = 3 * len(res.spline[k][0])
f = splineutils.spline_curvature(res.spline[k], np.linspace(0, 1, N))
else:
f = res.displacement[:, k]
fig, ax = show_edge_scatter(
param.n_curve,
res.spline[k - 1], # res.spline[k],
res.spline[k], # res.spline[k + 1],
res.param0[k],
res.param[k],
f,
fig_ax=(fig, ax),
) # Show edge structures (spline curves, displacement vectors/curvature)
fig.tight_layout()
return fig, ax
def save_edge_vectorial_movie(param, data, res, curvature=False, size=(12, 9)):
if curvature:
name = "Edge_animation_curvature"
else:
name = "Edge_animation_displacement"
with out:
fig, ax = plt.subplots(figsize=size)
writer = imageio.get_writer(os.path.join(param.analysis_folder, name + ".gif"))
for k in range(data.K - 1):
fig, ax = show_edge_vectorial_aux(
param, data, res, k, curvature, fig_ax=(fig, ax)
)
fig.savefig(os.path.join(param.analysis_folder, "temp.png"))
writer.append_data(
imageio.imread(os.path.join(param.analysis_folder, "temp.png"))
)
writer.close()
plt.close(fig)
def show_edge_scatter(N, s1, s2, t1, t2, d, dmax=None, fig_ax=None):
"""Draw the cell-edge contour and the displacement vectors.
The contour is drawn using a scatter plot to color-code the displacements."""
if fig_ax is None:
fig, ax = plt.subplots()
else:
fig, ax = fig_ax
plt.figure(fig.number)
# Evaluate splines at window locations and on fine-resolution grid
c1 = splineutils.splevper(t1, s1)
c2 = splineutils.splevper(t2, s2)
c1p = splev(np.linspace(0, 1, N + 1), s1)
c2p = splev(np.linspace(0, 1, N + 1), s2)
# Interpolate displacements
# d = 0.5 + 0.5 * d / np.max(np.abs(d))
if len(d) < N + 1:
d = np.interp(np.linspace(0, 1, N + 1), t1, d, period=1)
if dmax is None:
dmax = np.max(np.abs(d))
if dmax == 0:
dmax = 1
# Plot results
# matplotlib.use('PDF')
lw = 1
s = 1 # Scaling factor for the vectors
ax.plot(c1p[0], c1p[1], "b", zorder=50, lw=lw)
ax.plot(c2p[0], c2p[1], "r", zorder=100, lw=lw)
# plt.scatter(c1p[0], c1p[1], c=d, cmap='bwr', vmin=-dmax, vmax=dmax, zorder=50, s1=lw)
# # plt.colorbar(label='Displacement [pixels]')
for j in range(len(t2)):
ax.arrow(
c1[0][j],
c1[1][j],
s * (c2[0][j] - c1[0][j]),
s * (c2[1][j] - c1[1][j]),
color="y",
zorder=200,
lw=lw,
)
# plt.arrow(c1[0][j], c1[1][j], s1 * u[0][j], s1 * u[1][j], color='y', zorder=200, lw=lw) # Show normal to curve
ax.arrow(
c1[0][0],
c1[1][0],
s * (c2[0][0] - c1[0][0]),
s * (c2[1][0] - c1[1][0]),
color="c",
zorder=400,
lw=lw,
)
fig.tight_layout()
return fig, ax
def show_edge_raster_coloured_by_feature(
data, res, k, feature, N=None, width=1, fig_ax=None, normalize=False, cmap_name='seismic'):
"""Display the rasterized contour colored by a given feature on top of image.
Parameters
----------
data : data object
res : result object
k : int
time point
feature : str
feature for coloring 'displacement', 'displacement_cumul', 'curvature'
N : int
number of points for contour generation, default None
width : int, optional
width of contour for display, by default 1
fig_ax : tuple, optional
matplotlib figure-axis tuple, by default None
normalize : bool, optional
normalize intensity over time-lapse, by default False
cmap_name : str, optional
matplotlib colormap, by default 'seismic'
Returns
-------
fig, ax: Matplotlib figure and axis
"""
if fig_ax is None:
fig, ax = plt.subplots()
else:
fig, ax = fig_ax
plt.figure(fig.number)
im_disp, mask = splineutils.edge_colored_by_features(
data, res, t=k, feature=feature, N=N, enlarge_width=width)
min_val = None
max_val = None
if normalize:
if feature == 'displacement':
min_val = res.displacement.min()
max_val = res.displacement.max()
elif feature == 'displacement_cumul':
min_val = np.cumsum(res.displacement, axis=1).min()
max_val = np.cumsum(res.displacement, axis=1).max()
im_disp_coloured = colorize_raster(
im_disp, cmap_name=cmap_name,
min_val=min_val, max_val=max_val,
mask=mask)
ax.imshow(data.load_frame_morpho(k), cmap='gray')
ax.imshow(im_disp_coloured)
ax.set_title("Frame " + str(k))
fig.tight_layout()
return fig, ax
def colorize_raster(im, cmap_name, min_val=None, max_val=None, mask=None, alpha=0.5):
"""Colorize an image with a given colormap.
Parameters
----------
im : ndarray
image to colorize
cmap_name : str
Matplotlib colormap
min_val : float, optional
min value to display, by default min of image
max_val : [type], optional
max value to display, by default max of image
mask : ndarray, optional
mask to make empty regions transparent, by default None
alpha : float, optional
transparency of image, by default 0.5
Returns
-------
c: ndarray
colorized image (nxmx4)
"""
if mask is None:
| |
<gh_stars>1-10
# encoding: utf-8
"""
pypcsim implementation of the PyNN API.
<NAME> <EMAIL>
<NAME> <EMAIL>
<NAME> <EMAIL>
December 2006-
:copyright: Copyright 2006-2013 by the PyNN team, see AUTHORS.
:license: CeCILL, see LICENSE for details.
$Id: __init__.py 1258 2013-01-31 15:01:25Z apdavison $
"""
__version__ = "$Revision: 1258 $"
import sys
import pyNN.random
from pyNN.random import *
from pyNN import common, recording, errors, space, core, __doc__
from pyNN.pcsim import simulator
common.simulator = simulator
recording.simulator = simulator
import os.path
import types
import sys
import numpy
import pypcsim
from pyNN.pcsim.standardmodels.cells import *
from pyNN.pcsim.connectors import *
from pyNN.pcsim.standardmodels.synapses import *
from pyNN.pcsim.electrodes import *
from pyNN.pcsim.recording import *
from pyNN import standardmodels
try:
import tables
except ImportError:
pass
import exceptions
from datetime import datetime
import operator
Set = set
ID = simulator.ID
# ==============================================================================
# Utility classes
# ==============================================================================
# Implementation of the NativeRNG
class NativeRNG(pyNN.random.NativeRNG):
def __init__(self, seed=None, type='MersenneTwister19937'):
pyNN.random.AbstractRNG.__init__(self, seed)
self.rndEngine = getattr(pypcsim, type)()
if not self.seed:
self.seed = int(datetime.today().microsecond)
self.rndEngine.seed(self.seed)
def next(self, n=1, distribution='Uniform', parameters={'a':0,'b':1}, mask_local=None):
"""Return n random numbers from the distribution.
If n is 1, return a float, if n > 1, return a numpy array,
if n <= 0, raise an Exception."""
distribution_type = getattr(pypcsim, distribution + "Distribution")
if isinstance(parameters, dict):
dist = apply(distribution_type, (), parameters)
else:
dist = apply(distribution_type, tuple(parameters), {})
values = [ dist.get(self.rndEngine) for i in xrange(n) ]
if n == 1:
return values[0]
else:
return values
def list_standard_models():
"""Return a list of all the StandardCellType classes available for this simulator."""
setup()
standard_cell_types = [obj for obj in globals().values() if isinstance(obj, type) and issubclass(obj, standardmodels.StandardCellType)]
for cell_class in standard_cell_types:
try:
create(cell_class)
except Exception, e:
print "Warning: %s is defined, but produces the following error: %s" % (cell_class.__name__, e)
standard_cell_types.remove(cell_class)
return [obj.__name__ for obj in standard_cell_types]
class WDManager(object):
def getWeight(self, w=None):
if w is not None:
weight = w
else:
weight = 1.
return weight
def getDelay(self, d=None):
if d is not None:
delay = d
else:
delay = simulator.state.min_delay
return delay
def convertWeight(self, w, conductance):
if conductance:
w_factor = 1e-6 # Convert from µS to S
else:
w_factor = 1e-9 # Convert from nA to A
if isinstance(w, pyNN.random.RandomDistribution):
weight = pyNN.random.RandomDistribution(w.name, w.parameters, w.rng)
if weight.name == "uniform":
(w_min, w_max) = weight.parameters
weight.parameters = (w_factor*w_min, w_factor*w_max)
elif weight.name == "normal":
(w_mean, w_std) = weight.parameters
weight.parameters = (w_factor*w_mean, w_factor*w_std)
else:
print "WARNING: no conversion of the weights for this particular distribution"
else:
weight = w*w_factor
return weight
def reverse_convertWeight(self, w, conductance):
if conductance:
w_factor = 1e6 # Convert from S to µS
else:
w_factor = 1e9 # Convert from A to nA
return w*w_factor
def convertDelay(self, d):
if isinstance(d, pyNN.random.RandomDistribution):
delay = pyNN.random.RandomDistribution(d.name, d.parameters, d.rng)
if delay.name == "uniform":
(d_min, d_max) = delay.parameters
delay.parameters = (d_min/1000., d_max/1000.)
elif delay.name == "normal":
(d_mean, d_std) = delay.parameters
delay.parameters = (d_mean/1000., w_std)
else:
delay = d/1000.
return delay
# ==============================================================================
# Functions for simulation set-up and control
# ==============================================================================
def setup(timestep=0.1, min_delay=0.1, max_delay=10.0, **extra_params):
"""
Should be called at the very beginning of a script.
extra_params contains any keyword arguments that are required by a given
simulator but not by others.
For pcsim, the possible arguments are 'construct_rng_seed' and 'simulation_rng_seed'.
"""
if simulator.state.constructRNGSeed is None:
if extra_params.has_key('construct_rng_seed'):
construct_rng_seed = extra_params['construct_rng_seed']
else:
construct_rng_seed = datetime.today().microsecond
simulator.state.constructRNGSeed = construct_rng_seed
if simulator.state.simulationRNGSeed is None:
if extra_params.has_key('simulation_rng_seed'):
simulation_rng_seed = extra_params['simulation_rng_seed']
else:
simulation_rng_seed = datetime.today().microsecond
simulator.state.simulationRNGSeed = simulation_rng_seed
if extra_params.has_key('threads'):
simulator.net = pypcsim.DistributedMultiThreadNetwork(
extra_params['threads'],
pypcsim.SimParameter( pypcsim.Time.ms(timestep),
pypcsim.Time.ms(min_delay),
pypcsim.Time.ms(max_delay),
simulator.state.constructRNGSeed,
simulator.state.simulationRNGSeed))
else:
simulator.net = pypcsim.DistributedSingleThreadNetwork(
pypcsim.SimParameter( pypcsim.Time.ms(timestep),
pypcsim.Time.ms(min_delay),
pypcsim.Time.ms(max_delay),
simulator.state.constructRNGSeed,
simulator.state.simulationRNGSeed))
simulator.state.t = 0
#simulator.state.dt = timestep # seems to mess up the net object
simulator.state.min_delay = min_delay
simulator.state.max_delay = max_delay
common.setup(timestep, min_delay, max_delay, **extra_params)
return simulator.net.mpi_rank()
def end(compatible_output=True):
"""Do any necessary cleaning up before exiting."""
for recorder in simulator.recorder_list:
recorder.write(gather=True, compatible_output=compatible_output)
simulator.recorder_list = []
def run(simtime):
"""Run the simulation for simtime ms."""
simulator.state.t += simtime
simulator.net.advance(int(simtime / simulator.state.dt ))
return simulator.state.t
reset = common.reset
initialize = common.initialize
get_current_time = common.get_current_time
get_time_step = common.get_time_step
get_min_delay = common.get_min_delay
get_max_delay = common.get_max_delay
num_processes = common.num_processes
rank = common.rank
# ==============================================================================
# High-level API for creating, connecting and recording from populations of
# neurons.
# ==============================================================================
class Population(common.Population):
"""
An array of neurons all of the same type. `Population' is used as a generic
term intended to include layers, columns, nuclei, etc., of cells.
"""
recorder_class = Recorder
def __init__(self, size, cellclass, cellparams=None, structure=None,
label=None, parent=None):
__doc__ = common.Population.__doc__
common.Population.__init__(self, size, cellclass, cellparams, structure, label)
def _create_cells(self, cellclass, cellparams, n):
"""
Create cells in PCSIM.
`cellclass` -- a PyNN standard cell or a native PCSIM cell class.
`cellparams` -- a dictionary of cell parameters.
`n` -- the number of cells to create
`parent` -- the parent Population, or None if the cells don't belong to
a Population.
This function is used by both `create()` and `Population.__init__()`
Return:
- a 1D array of all cell IDs
- a 1D boolean array indicating which IDs are present on the local MPI
node
- the ID of the first cell created
- the ID of the last cell created
"""
global net
assert n > 0, 'n must be a positive integer'
# if isinstance(cellclass, str):
# if not cellclass in dir(pypcsim):
# raise errors.InvalidModelError('Trying to create non-existent cellclass ' + cellclass )
# cellclass = getattr(pypcsim, cellclass)
# self.celltype = cellclass
# if issubclass(cellclass, standardmodels.StandardCellType):
self.celltype = cellclass(cellparams)
self.cellfactory = self.celltype.simObjFactory
# else:
# self.celltype = cellclass
# if issubclass(cellclass, pypcsim.SimObject):
# self.cellfactory = cellclass(**cellparams)
# else:
# raise exceptions.AttributeError('Trying to create non-existent cellclass ' + cellclass.__name__ )
self.all_cells = numpy.array([id for id in simulator.net.add(self.cellfactory, n)], simulator.ID)
self.first_id = self.all_cells[0]
self.last_id = self.all_cells[-1]
# mask_local is used to extract those elements from arrays that apply to the cells on the current node
self._mask_local = numpy.array([simulator.is_local(id) for id in self.all_cells])
for i,id in enumerate(self.all_cells):
self.all_cells[i] = simulator.ID(self.all_cells[i])
self.all_cells[i].parent = self
# CuboidGridPopulation(SimNetwork &net, GridPoint3D origin, Volume3DSize dims, SimObjectFactory &objFactory)
##self.pcsim_population = pypcsim.CuboidGridObjectPopulation(
## simulator.net,
## pypcsim.GridPoint3D(0,0,0),
## pypcsim.Volume3DSize(dims[0], dims[1], dims[2]),
## self.cellfactory)
##self.cell = numpy.array(self.pcsim_population.idVector())
##self.first_id = 0
##self.cell -= self.cell[0]
##self.all_cells = self.cell
##self.local_cells = numpy.array(self.pcsim_population.localIndexes())
##def __getitem__(self, addr):
## """Return a representation of the cell with coordinates given by addr,
## suitable for being passed to other methods that require a cell id.
## Note that __getitem__ is called when using [] access, e.g.
## p = Population(...)
## p[2,3] is equivalent to p.__getitem__((2,3)).
## """
## if isinstance(addr, int):
## addr = (addr,)
## if len(addr) != self.actual_ndim:
## raise errors.InvalidDimensionsError, "Population has %d dimensions. Address was %s" % (self.actual_ndim, str(addr))
## orig_addr = addr;
## while len(addr) < 3:
## addr += (0,)
## index = 0
## for i, s in zip(addr, self.steps):
## index += i*s
## pcsim_index = self.pcsim_population.getIndex(addr[0], addr[1], addr[2])
## assert index == pcsim_index, " index = %s, pcsim_index = %s" % (index, pcsim_index)
## id = ID(pcsim_index)
## id.parent = self
## if orig_addr != self.locate(id):
## raise IndexError, 'Invalid cell address %s' % str(addr)
## assert orig_addr == self.locate(id), 'index=%s addr=%s id=%s locate(id)=%s' % (index, orig_addr, id, self.locate(id))
## return id
##def __iter__(self):
## return self.__gid_gen()
def __gid_gen(self):
"""
Generator to produce an iterator over all cells on this node,
returning gids.
"""
ids = self.pcsim_population.idVector()
for i in ids:
id = ID(i-ids[0])
id.parent = self
yield id
def id_to_index(self, id):
cells = self.all_cells
## supposed to support id being a list/array of IDs.
## For now, restrict to single ID
##if hasattr(id, '__len__'):
## res = []
## for item in id:
## res.append(numpy.where(cells == item)[0][0])
## return numpy.array(res)
##else:
return cells.tolist().index(id) # because ids may not be consecutive when running a distributed sim
##def getObjectID(self, index):
## return self.pcsim_population[index]
##def __len__(self):
## """Return the total number of cells in the population."""
## return self.pcsim_population.size()
##def tset(self, parametername, value_array):
## """
## 'Topographic' set. Set the value of parametername to the values in
## value_array, which must have the same dimensions as the Population.
## """
## """PCSIM: iteration and set """
## if self.dim[0:self.actual_ndim] == value_array.shape:
| |
<gh_stars>0
import os
import json
import requests
import functools
from copy import copy
from .functions.classes import Mode
from .functions import exceptions
__all__ = ["BaseJai"]
def raise_status_error(code):
"""
Decorator to process responses with unexpected response codes.
Args
----
code: int
Expected Code.
"""
def decorator(function):
@functools.wraps(function)
def new_function(*args, **kwargs):
response = function(*args, **kwargs)
if response.status_code == code:
return response.json()
# find a way to process this
# what errors to raise, etc.
message = f"Something went wrong.\n\nSTATUS: {response.status_code}\n"
try:
res_json = response.json()
print(res_json)
if isinstance(res_json, dict):
detail = res_json.get(
'message', res_json.get('detail', response.text))
else:
detail = response.text
except:
detail = response.text
detail = str(detail)
if "Error: " in detail:
error, msg = detail.split(": ", 1)
try:
raise eval(error)(message + msg)
except NameError:
raise eval("exceptions." + error)(message + msg)
except:
raise ValueError(message + response.text)
else:
raise ValueError(message + detail)
return new_function
return decorator
class BaseJai(object):
"""
Base class for requests with the Mycelia API.
"""
def __init__(self,
auth_key: str = None,
url: str = None,
var_env: str = "JAI_SECRET"):
"""
Inicialize the Jai class.
An authorization key is needed to use the Mycelia API.
Parameters
----------
auth_key : str
Authorization key for the use of the API.
url : str, optional
Param used for development purposes. `Default is None`.
Returns
-------
None
"""
if auth_key is None:
auth_key = os.environ.get(var_env, "")
if url is None:
self.__url = "https://mycelia.azure-api.net"
self.header = {"Auth": auth_key}
else:
self.__url = url[:-1] if url.endswith("/") else url
self.header = {"company-key": auth_key}
@property
def url(self):
"""
Get name and type of each database in your environment.
"""
return self.__url
@raise_status_error(200)
def _info(self, mode="complete", get_size=True):
"""
Get name and type of each database in your environment.
"""
get_size = json.dumps(get_size)
return requests.get(url=self.url +
f"/info?mode={mode}&get_size={get_size}",
headers=self.header)
@raise_status_error(200)
def _status(self):
"""
Get the status of your JAI environment when training.
"""
return requests.get(self.url + "/status", headers=self.header)
@raise_status_error(200)
def _delete_status(self, name):
return requests.delete(self.url + f"/status?db_name={name}",
headers=self.header)
@raise_status_error(200)
def _download_vectors(self, name: str):
"""
Download vectors from a particular database.
Args
----
name : str
String with the name of a database in your JAI environment.
"""
return requests.get(self.url + f"/key/{name}", headers=self.header)
@raise_status_error(200)
def _filters(self, name):
"""
Gets the valid values of filters.
Args
----
name : str
String with the name of a database in your JAI environment.
"""
return requests.get(self.url + f"/filters/{name}", headers=self.header)
@raise_status_error(200)
def _similar_id(self,
name: str,
id_item: list,
top_k: int = 5,
filters=None):
"""
Creates a list of dicts, with the index and distance of the k items most similars given an id.
This is a protected method.
Args
----
name : str
String with the name of a database in your JAI environment.
id_item : list
List of ids of the item the user is looking for.
top_k : int
Number of k similar items we want to return. `Default is 5`.
Return
------
response : dict
Dictionary with the index and distance of `the k most similar items`.
"""
if not isinstance(id_item, list):
raise TypeError(
f"id_item param must be int or list, `{id_item.__class__.__name__}` found."
)
filtering = "" if filters is None else "".join(
["&filters=" + s for s in filters])
url = self.url + f"/similar/id/{name}?top_k={top_k}" + filtering
return requests.put(
url,
headers=self.header,
json=id_item,
)
@raise_status_error(200)
def _similar_json(self,
name: str,
data_json,
top_k: int = 5,
filters=None):
"""
Creates a list of dicts, with the index and distance of the k items most similars given a JSON data entry.
This is a protected method
Args
----
name : str
String with the name of a database in your JAI environment.
data_json : dict (JSON)
Data in JSON format. Each input in the dictionary will be used to search for the `top_k` most
similar entries in the database.
top_k : int
Number of k similar items we want to return. `Default is 5`.
Return
------
response : dict
Dictionary with the index and distance of `the k most similar items`.
"""
filtering = "" if filters is None else "".join(
["&filters=" + s for s in filters])
url = self.url + f"/similar/data/{name}?top_k={top_k}" + filtering
header = copy(self.header)
header['Content-Type'] = "application/json"
return requests.put(url, headers=header, data=data_json)
@raise_status_error(200)
def _predict(self, name: str, data_json, predict_proba: bool = False):
"""
Predict the output of new data for a given database by calling its
respecive API method. This is a protected method.
Args
----
name : str
String with the name of a database in your JAI environment.
data_json : JSON file (dict)
Data to be inferred by the previosly trained model.
predict_proba : bool
Whether or not to return the probabilities of each prediction. `Default is False`.
Return
-------
results : dict
Dictionary of predctions for the data passed as parameter.
"""
url = self.url + \
f"/predict/{name}?predict_proba={predict_proba}"
header = copy(self.header)
header['Content-Type'] = "application/json"
return requests.put(url, headers=header, data=data_json)
@raise_status_error(200)
def _ids(self, name: str, mode: Mode = "simple"):
"""
Get id information of a given database.
Args
mode : str, optional
Return
-------
response: list
List with the actual ids (mode: 'complete') or a summary of ids
('simple'/'summarized') of the given database.
Example
----------
>>> name = 'chosen_name'
>>> j = Jai(AUTH_KEY)
>>> ids = j.ids(name)
>>> print(ids)
['891 items from 0 to 890']
"""
return requests.get(self.url + f"/id/{name}?mode={mode}",
headers=self.header)
@raise_status_error(200)
def _is_valid(self, name: str):
"""
Check if a given name is a valid database name (i.e., if it is in your environment).
Args
----
`name`: str
String with the name of a database in your JAI environment.
Return
------
response: bool
True if name is in your environment. False, otherwise.
"""
return requests.get(self.url + f"/validation/{name}",
headers=self.header)
@raise_status_error(202)
def _append(self, name: str):
"""
Add data to a database that has been previously trained.
This is a protected method.
Args
----
name : str
String with the name of a database in your JAI environment.
Return
------
response : dict
Dictionary with the API response.
"""
return requests.patch(self.url + f"/data/{name}", headers=self.header)
@raise_status_error(200)
def _insert_json(self, name: str, data_json, filter_name: str = None):
"""
Insert data in JSON format. This is a protected method.
Args
----
name : str
String with the name of a database in your JAI environment.
data_json : dict
Data in JSON format.
Return
------
response : dict
Dictionary with the API response.
"""
filtering = "" if filter_name is None else f"?filter_name={filter_name}"
url = self.url + f"/data/{name}" + filtering
header = copy(self.header)
header['Content-Type'] = "application/json"
return requests.post(url, headers=header, data=data_json)
@raise_status_error(201)
def _setup(self, name: str, body, overwrite=False):
"""
Call the API method for database setup.
This is a protected method.
Args
----
name : str
String with the name of a database in your JAI environment.
db_type : str
Database type (Supervised, SelfSupervised, Text...)
overwrite : bool
[Optional] Whether of not to overwrite the given database. `Default is False`.
**kwargs:
Any parameters the user wants to (or needs to) set for the given datase. Please
refer to the API methods to see the possible arguments.
Return
-------
response : dict
Dictionary with the API response.
"""
overwrite = json.dumps(overwrite)
return requests.post(
self.url + f"/setup/{name}?overwrite={overwrite}",
headers=self.header,
json=body,
)
@raise_status_error(200)
def _report(self, name, verbose: int = 2):
"""
Get a report about the training model.
Parameters
----------
name : str
String with the name of a database in your JAI environment.
verbose : int, optional
Level of description. The default is 2.
Use verbose 2 to get the loss graph, verbose 1 to get only the
metrics result.
Returns
-------
dict
Dictionary with the information.
"""
return requests.get(self.url + f"/report/{name}?verbose={verbose}",
headers=self.header)
@raise_status_error(200)
def _temp_ids(self, name: str, mode: Mode = "simple"):
"""
Get id information of a RAW database (i.e., before training). This is a protected method
Args
----
name : str
String with the name of a database in your JAI environment.
mode : str, optional
Level of detail to return. Possible values are 'simple', 'summarized' or 'complete'.
Return
-------
response: list
List with the actual ids (mode: 'complete') or a | |
<reponame>Speedy905/Cave-Dweller
#Project <NAME>
#------------------------------------------------------
#MAKE SURE YOU CHANGE THE VERSION EVERYTIME YOU EDIT IT
#Version: 1.0 (FINAL)
#------------------------------------------------------
#Adam, Andres, <NAME>, Peter, Seymour
#------------------------------------------------------
#Imports pygame and other necessities
import pygame, random, sys, copy, time
from pygame.locals import *
#Sets up the pygame variables and requirements
WINDOWWIDTH = 800
WINDOWHEIGHT = 600
TEXTCOLOR = (255,255,255)
BACKGROUNDCOLOR = (0,0,0)
FPS = 60
PLAYERMOVERATE = 6
bear_values = [0,4]
bat_food = 0
turtle_food = 0
rabbit_food = 0
fox_food = 0
#Player movement variables
moveLeft = False
moveRight = False
moveUp = False
moveDown = False
#Main classes of the game.
class PCanimals(): #all the animals that the user controls
pass #nothing but the bear, so everything initialized in the bear class
class bearClass(PCanimals): #only controllable character
def __init__(self):
super(bearClass, self).__init__()
self.name = 'Bear' #name of animal
self.food = 0 #user starts with 0 food
self.energy = 6 #user starts with 6 energy to use to enter activities
#############################################
class NPCanimals(object): #all the AI that the user will challenge
def __init__(self):
self.name = 'Animal Name' #only placeholder
self.food = 10 # AI has 10 food to give away
self.energy = 2 # AI has 2 energy to give away
class foxClass(NPCanimals): #class for the fox
def __init__(self):
super(foxClass, self).__init__()
self.name = 'Fox' #name of animal
class batClass(NPCanimals):
def __init__(self):
super(batClass, self).__init__()
self.name = 'Bat' #name of animal
class turtleClass(NPCanimals):
def __init__(self):
super(batClass, self).__init__()
self.name = 'Turtle' #name of animal
class rabbitClass(NPCanimals):
def __init__(self):
super(batClass, self).__init__()
self.name = 'Rabbit' #name of animal
#############################################
#Sets up the main functions for the game
#Exits the game
def leave():
pygame.quit()
sys.exit()
#Waits for user to press a specific key
def waitForPlayertoPressKey():
while True:
for event in pygame.event.get():
if event.type == QUIT:
leave()
if event.type == KEYDOWN:
#Pressing escape or q exits the game
if event.key == K_ESCAPE or event.key == ord('q') :
leave()
#Pressing h goes to the help menu
if event.key == ord('h'):
showHelp()
return
#Waits for player to press any Key
def anyKey():
while True:
for event in pygame.event.get():
if event.type == QUIT:
leave()
if event.type == KEYDOWN:
return
#Be able to show text
def drawText(text, font, surface, x, y):
textobj = font.render(text, 1, TEXTCOLOR)
textrect = textobj.get_rect()
textrect.topleft = (x, y)
surface.blit(textobj, textrect)
#PUZZLE FUNCTIONS
#--------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------
def batPuzzle(bear_values): # Creates a function to the name batPuzzle that recieves 1 parameter.
# Loads an image and makes it into a rect.
bat_img = pygame.image.load('puzzle images/bat_background.png')
bat_img_rect = bat_img.get_rect()
user_chances = 0 # Assigns the value of 0 to user_chances.
guess_count = 3 # Assigns the value of 3 to user_chances.
while user_chances < 3: # While user_chances is less than 3, execute the code below.
# Loads 2 rock images and makes them into rects.
spriteRock1 = pygame.image.load('puzzle images/rock1.png')
rock1Rect = spriteRock1.get_rect()
spriteRock2 = pygame.image.load('puzzle images/rock2.png')
rock2Rect = spriteRock2.get_rect()
# Blits the background onto the screen,
windowSurface.blit(bat_img, bat_img_rect)
# Draw text to the screen using our drawText function.
drawText('Hello, there are berries under one of these rocks.', font, windowSurface, (WINDOWWIDTH/5), (WINDOWHEIGHT/5))
drawText('Guess which one it is under to get your food.', font, windowSurface, (WINDOWWIDTH/5), (WINDOWHEIGHT/5) + 30)
drawText('Press 1 for the first rock or 2 for the second rock.', font, windowSurface, (WINDOWWIDTH/5), (WINDOWHEIGHT/5)+ 60)
drawText('You have ' + str(guess_count) + ' guess(es)', font, windowSurface, (WINDOWWIDTH/5), (WINDOWHEIGHT/5) + 90)
# Moves the position of the rects.
rock1Rect.top += 300
rock1Rect.right += 200
rock2Rect.top += 300
rock2Rect.right += 400
# Blits the two rocks onto the screen.
windowSurface.blit(spriteRock1, rock1Rect)
windowSurface.blit(spriteRock2, rock2Rect)
bat_list = [1,0] # Creates a list with two values.
random.shuffle(bat_list) # Shuffles the list.
pygame.display.update() # Updates the screen.
for event in pygame.event.get():
if event.type == QUIT: # If the event is the user pressing the x at the top right, execute the code below.
leave() # Run the leave() function.
if event.type == KEYDOWN: # If the event is a user pushing a key down, execute the code below.
if event.key == K_1: # If the key pushed down is 1, execute the code below.
rock_1 = bat_list.pop(0) # Pops the list at position 0 and assigns it to rock_1.
if rock_1 == 1: # If rock_1 is 1, execute the code below.
windowSurface.blit(bat_img, bat_img_rect) # Blits the background onto the screen.
# Draws text onto the screen.
drawText('Correct, here is your food. Press any key to return.', font, windowSurface, (WINDOWWIDTH/5), (WINDOWHEIGHT/5))
user_chances = 4 # Assigns user_chances to 4 to stop the while loop.
pygame.display.update() # Updates the screen.
anyKey() # Allows the user to press any key to continue.
else: # If the above if does not run, execute the code below.
windowSurface.blit(bat_img, bat_img_rect) # Blits the background onto the screen.
# Draws text to the screen.
drawText('Wrong, I will shuffle the rocks then try again. Press any key to return', font, windowSurface, (WINDOWWIDTH/5), (WINDOWHEIGHT/5))
user_chances += 1 # Adds 1 to user_chances.
guess_count -= 1 # Subtracts 1 from guess_count.
pygame.display.update() # Updates the screen.
anyKey() # Allows the user to press any key to continue.
if event.key == K_2: # If the key pushed down is 2, execute the code below.
rock_2 = bat_list.pop(0) # Pops the list at position 0 and assigns it to rock_2.
if rock_2 == 1: # If rock_2 is 1, execute the code below.
windowSurface.blit(bat_img, bat_img_rect)# Blits the background onto the screen.
# Draws text to the screen.
drawText('Correct, here is your food. Press any key to return.', font, windowSurface, (WINDOWWIDTH/5), (WINDOWHEIGHT/5))
user_chances = 4 # Assigns user_chances to 4 to stop the while loop.
pygame.display.update() # Updates the screen.
anyKey() # Allows the user to press any key to continue.
else: # If the above if does not run, execute the code below.
windowSurface.blit(bat_img, bat_img_rect) # Blits the background onto the screen.
# Draws text to the screen.
drawText('Wrong, I will shuffle the rocks then try again. Press any key to return.', font, windowSurface, (WINDOWWIDTH/5), (WINDOWHEIGHT/5))
user_chances += 1 # Adds 1 to user_chances.
guess_count -= 1 # Subtracts 1 from guess_count.
pygame.display.update() # Updates the screen.
anyKey() # Allows the user to press any key to continue.
if user_chances >= 4: # If user_chances is greater than or equal to 4, execute the code below.
bear_food = bear_values.pop(0) # Pops from position 0 and assigns the value to bear_food.
bear_food += 2 # Adds 2 to bear_food.
bear_values.insert(0, bear_food) # Inserts the value back into the list at position 0.
return bear_values # Return bear_values.
else: # If the above if does not run, execute the code below.
bear_energy = bear_values.pop(1) # Pops from position 1 and assigns the value to bear_energy.
bear_energy -= 1 # Subtracts 1 from bear_energy.
bear_values.insert(1, bear_energy) # Inserts the value back into the list at position 1.
return bear_values # Return bear_values.
#Rabbit Puzzle
def rabbitPuzzle(bear_values): # Define a function to the name rabbitPuzzle that takes 1 parameter.
# Loads images and makes them all into rects.
race_start = pygame.image.load('puzzle images/rabbit_race.png')
race_startRect = race_start.get_rect()
race_win = pygame.image.load('puzzle images/rabbit_race_bwin.png')
race_winRect = race_win.get_rect()
race_lose = pygame.image.load('puzzle images/rabbit_race_rwin.png')
race_loseRect = race_lose.get_rect()
windowSurface.fill(BACKGROUNDCOLOR) # Fills the window with the background colour.
# Draws text to the screen calling the drawText function.
drawText('Hello we will race for your prize.', font, windowSurface, (WINDOWWIDTH / 5), (WINDOWHEIGHT / 5))
drawText('You have an 80% chance of winning if you race once.', font, windowSurface, (WINDOWWIDTH / 5), (WINDOWHEIGHT / 5) + 20)
drawText('You have an 60% chance of winning if you race twice.', font, windowSurface, (WINDOWWIDTH / 5), (WINDOWHEIGHT / 5) + 50)
drawText('Race 1 gives you two food if you win and Race 2 gives you 3.', font, windowSurface, (WINDOWWIDTH / 5), (WINDOWHEIGHT / 5) + 80)
drawText('Press 1 for race one and press 2 for race two.', font, windowSurface, (WINDOWWIDTH / 5), (WINDOWHEIGHT / 5) + 110)
race_startRect.topleft = (0, 385) # Moves the position of the rect.
windowSurface.blit(race_start, race_startRect) # Blits the rect to | |
second slice of the polygon
Description
-----------
1) A horizontal/vertical line is drawn from the point.
2) A new vertex is inserted into a polygon at the first intersection
of an edge of the polygon with the horizontal/vertical line.
3) The polygon is sliced horizontally/vertically from the inserted
vertex
'''
try:
xyi, index = self.insertVertex(point, xy, horizontal)
except ValueError:
if horizontal:
raise ValueError('fracture.slicePoint : The polygon cannot be sliced horizontally from this point')
else:
raise ValueError('fracture.slicePoint : The polygon cannot be sliced vertically from this point')
except:
raise ValueError('fracture.slicePoint : Something unaccounted for threw an error')
s1, s2 = self.slicePolygon(index, xyi, horizontal)
return s1, s2
def insertVertex(self, point, xy, horizontal = True):
'''
insertVertex(point, xy, horizontal = True)
Returns a polygon with a vertex inserted along the axis of the point
Parameters
----------
pointIndex : integer
An integer specifying the vertex to slice
xy : Nx2 numpy.ndarray
An array of points representing a polygon
horizontal : boolean
Slice horizontally (true) or vertically (false)
Slicing vertically takes more time since xy is changed to yx before
slicing horizontally and then yx is changed back to xy
Vertical slicing can be sped up by independent implementation
Returns
-------
xyi : Nx2 numpy.ndarray
The polygon with a vertex inserted
index : integer
The index of the inserted vertex
Description
-----------
1) A horizontal/vertical line is drawn from the point.
2) A new vertex is inserted into a polygon at the first intersection
of an edge of the polygon with the horizontal/vertical line.
'''
if not horizontal:
xy[:,[0,1]] = xy[:,[1,0]]
point = point[::-1]
rightEdge = self.isEdgePoly((point[0]+self.eps,point[1]), xy)
rightInside, tmp, rightCrossIndex = self.isInsidePolyByPoint((point[0]+self.eps,point[1]),xy,True)
#Determine if a point px+eps is inside or on the edge of the polygon
leftEdge = self.isEdgePoly((point[0]-self.eps,point[1]), xy)
leftInside, leftCrossIndex, tmp = self.isInsidePolyByPoint((point[0]-self.eps,point[1]),xy,True)
if not rightEdge and np.any(rightCrossIndex):
#Identify the nearest cross edge
index = None
dMax = np.inf
d = dMax
for i in range(rightCrossIndex.size):
if rightCrossIndex[i]:
if xy[i+1,1] == point[1] or xy[i,1] == point[1]:
pass
else:
dxdy = (xy[i+1,0]-xy[i,0])/float(xy[i+1,1]-xy[i,1])
cy = point[1] - xy[i,1]
d = (point[0] - xy[i,0] - dxdy*cy)**2
if d < dMax:
index = i
dMax = d
elif not leftEdge and np.any(leftCrossIndex):
#Identify the nearest cross edge
index = None
dMax = np.inf
d = dMax
for i in range(leftCrossIndex.size):
if leftCrossIndex[i]:
if xy[i+1,1] == point[1] or xy[i,1] == point[1]:
pass
else:
dxdy = (xy[i+1,0]-xy[i,0])/float(xy[i+1,1]-xy[i,1])
cy = point[1] - xy[i,1]
d = (point[0] - xy[i,0] - dxdy*cy)**2
if d < dMax:
index = i
dMax = d
else:
if not horizontal:
xy[:,[0,1]] = xy[:,[1,0]]
raise ValueError('This polygon cannot be sliced at the specified vertex')
if index == None:
raise ValueError('This polygon cannot be sliced at the specified vertex')
#Split the polygon into two
if xy[index+1,0] == xy[index,0]:
newPoint = np.array([[xy[index,0],point[1]]])
elif xy[index+1,1] == xy[index,1]:
if np.abs(xy[index+1,0] - point[0]) < np.abs(xy[index,0] - point[0]):
newPoint = xy[[index+1]]
else:
newPoint = xy[[index]]
else:
dxdy = (xy[index+1,0]-xy[index,0])/float(xy[index+1,1]-xy[index,1])
cy = point[1] - xy[index,1]
xint = xy[index,0] + dxdy*cy
newPoint = np.array([[xint,point[1]]])
if np.all(xy[index] == newPoint):
xyi = xy.copy()
elif np.all(xy[index+1] == newPoint):
xyi = xy.copy()
index += 1
else:
xyi = np.insert(xy,index+1,newPoint,axis=0)
index += 1
if not horizontal:
xy[:,[0,1]] = xy[:,[1,0]]
xyi[:,[0,1]] = xyi[:,[1,0]]
return xyi, index
def checkPrimitive(self, vertices):
'''
checkPrimitives(vertices)
Returns true if the polygon may be a Jeol v3.0 format primitive
Parameters
----------
vertices : Nx1 numpy.ndarray of integers
[x0 y0 x1 y1 ... xn yn] or
[x0 y0 x1 y1 ... xn yn x0 y0]
Returns
-------
isPrimitive : boolean
Describes if the polygon is a primitive
failLog : String
A message describing why the polygon is not a primitive
Description
-----------
This function will confirm if the polygon is compatible with the
Jeol v3.0 format primitive, but it does not enforce the following
specification:
No negative values
Integers must range from 0 to 2^20
These specifications were ignored to allow fracturing of polygons that
extend beyond the field of an ebeam writer
The v3_Pat.checkPrimitive() should be use to qualify all polygons
that are ready for conversion.
'''
isPrimitive = False
failLog = 'No error was found'
if np.all(vertices[0:2] == vertices[-2:]):
vertices = vertices[:-2]
if not vertices.size in [6,8]:
failLog = 'The vertices parameter must contain 4, 6, or 8 elements'
if vertices.size == 8:
#The elements of vertices is [X1 Y1 X2 Y2 X3 Y3 X4 Y4]
#Determine if the base of the trapezoid is along X or Y
tmp = np.append(vertices,vertices[0:2])
isX = sum(np.diff(tmp[1::2]) == 0) == 2
isY = sum(np.diff(tmp[0::2]) == 0) == 2
tmp = vertices.reshape(4,2)
#The trapezoid is not supported
if not isX and not isY:
failLog = 'The trapezoid does not have both base parallel to either the X or Y axis'
#The trapezoid is a rectangle
if isX and isY:
isPrimitive = True
#The trapezoid has both base parallel to X axis
elif isX:
#Sort the vertices of the trapezoid
iA = tmp[:,1] == min(tmp[:,1])
iB = tmp[:,1] == max(tmp[:,1])
i1 = (tmp[:,0] == min(tmp[iA,0])) * iA
i2 = (tmp[:,0] == max(tmp[iA,0])) * iA
i3 = (tmp[:,0] == max(tmp[iB,0])) * iB
i4 = (tmp[:,0] == min(tmp[iB,0])) * iB
trap = np.array([tmp[i1],tmp[i2],tmp[i3],tmp[i4]],dtype=np.uint32).ravel()
theta1 = np.arctan(abs(int(trap[0])-int(trap[6]))/float(abs(int(trap[1])-int(trap[7]))))
theta2 = np.arctan(abs(int(trap[2])-int(trap[4]))/float(abs(int(trap[3])-int(trap[5]))))
if theta1 > np.pi/3 or theta2 > np.pi/3:
failLog = 'X Trapezoid Theta1 or Theta2 cannot be larger than 60 degrees'
else:
isPrimitive = True
#The trapezoid has both base parallel to the Y axis
elif isY:
#Sort the vertices of the trapezoid
iA = tmp[:,0] == min(tmp[:,0])
iB = tmp[:,0] == max(tmp[:,0])
i1 = (tmp[:,1] == min(tmp[iA,1])) * iA
i2 = (tmp[:,1] == max(tmp[iA,1])) * iA
i3 = (tmp[:,1] == max(tmp[iB,1])) * iB
i4 = (tmp[:,1] == min(tmp[iB,1])) * iB
trap = np.array([tmp[i1],tmp[i2],tmp[i3],tmp[i4]],dtype=np.uint32).ravel()
theta1 = np.arctan(abs(int(trap[7])-int(trap[1]))/float(abs(int(trap[0])-int(trap[6]))))
theta2 = np.arctan(abs(int(trap[3])-int(trap[5]))/float(abs(int(trap[2])-int(trap[4]))))
if theta1 > np.pi/3 or theta2 > np.pi/3:
failLog = 'Y Trapezoid Theta1 or Theta2 cannot be larger than 60 degrees'
else:
isPrimitive = True
elif vertices.size == 6:
#The element of vertices is [X1 Y1 X2 Y2 X3 Y3]
#Determineof the base of the triangle is along X or Y
tmp = np.append(vertices,vertices[0:2])
isX = sum(np.diff(tmp[1::2]) == 0) == 1
isY = sum(np.diff(tmp[0::2]) == 0) == 1
tmp = vertices.reshape(3,2)
if not isX and not isY:
failLog = 'One edge of the triangle must be parallel to either the X or Y axis'
#Right triangle
if isX and isY:
#Determine the base and height of a right triangle
w = max(vertices[0::2])-min(vertices[0::2])
h = max(vertices[1::2])-min(vertices[1::2])
#X right triangle
if h >= w:
#Sort the vertices
iA = tmp[:,1] == min(tmp[:,1])
iB = tmp[:,1] == max(tmp[:,1])
i1 = (tmp[:,0] == min(tmp[iA,0])) * iA
i2 = (tmp[:,0] == max(tmp[iA,0])) * iA
i3 = (tmp[:,0] == max(tmp[iB,0])) * iB
i4 = (tmp[:,0] == min(tmp[iB,0])) * iB
trap = np.array([tmp[i1],tmp[i2],tmp[i3],tmp[i4]],dtype=np.uint32).ravel()
theta1 = np.arctan(abs(int(trap[6])-int(trap[0]))/float(abs(int(trap[1])-int(trap[7]))))
theta2 = np.arctan(abs(int(trap[2])-int(trap[4]))/float(abs(int(trap[3])-int(trap[5]))))
if theta1 > np.pi/3 or theta2 > np.pi/3:
failLog = 'X Triangle Theta1 or Theta2 cannot be larger than 60 degrees'
else:
isPrimitive = True
#Y right triangle
else:
try:
#Sort the vertices
iA = tmp[:,0] == min(tmp[:,0])
iB = tmp[:,0] == max(tmp[:,0])
i1 = (tmp[:,1] == min(tmp[iA,1])) * iA
i2 = (tmp[:,1] == max(tmp[iA,1])) * iA
i3 = (tmp[:,1] == max(tmp[iB,1])) * iB
i4 = (tmp[:,1] == min(tmp[iB,1])) * iB
trap = np.array([tmp[i1],tmp[i2],tmp[i3],tmp[i4]],dtype=np.uint32).ravel()
theta1 = np.arctan(abs(int(trap[7])-int(trap[1]))/float(abs(int(trap[0])-int(trap[6]))))
theta2 = np.arctan(abs(int(trap[3])-int(trap[5]))/float(abs(int(trap[2])-int(trap[4]))))
if theta1 > np.pi/3 or theta2 > np.pi/3:
failLog = 'Y Triangle Theta1 or Theta2 cannot be larger than 60 degrees'
else:
isPrimitive = True
except:
failLog = 'Y Triangle is a Line'
#X triangle
elif isX:
#Sort the vertices
iA = tmp[:,1] == min(tmp[:,1])
iB = tmp[:,1] == max(tmp[:,1])
i1 = (tmp[:,0] == | |
<filename>check-oceanstor.py
import paramiko
import sys, argparse
import re
from argparse import RawTextHelpFormatter
if __name__ == "__main__":
# Exit code for nagios 0 -OK, 1 - Warning, 2 - Critical
exit_code = 0
output_info = ""
# OceanStor failed Health and Running status
failed_health_status = ["Offline", "Pre-fail", "Fault", "No Input", "--"]
failed_running_status = ["Offline", "Reconstruction", "Balancing", "--"]
def check_empty_respone():
pass
def set_exit_code(code):
# If code is more critical than actual level set it
global exit_code
if code > exit_code:
exit_code = code
def lslun():
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('show lun general')
ssh_lines = ssh_stdout.readlines()[4:]
output_info = ""
# return if there are no entries on storage system
if len(ssh_lines) == 0:
return "OK: There are no LUNs defined\n"
# Check if there are any critical LUNs
if not any( line.split()[4] in failed_health_status for line in ssh_lines ):
output_info += "OK: All LUNs Online \n"
else:
output_info += "CRITICAL: check your LUN status below \n"
set_exit_code(2)
for line in ssh_lines:
# Assign values
name, status = line.split()[1], line.split()[4]
# Check for errors
if status == "Normal":
output_info += "OK: LUN {} status: {}\n".format(name, status)
else:
output_info += "CRITICAL: LUN {} status: {}\n".format(name, status)
return output_info
def lsdisk():
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('show disk general')
ssh_lines = ssh_stdout.readlines()[4:]
output_info = ""
# return if there are no entries on storage system
if len(ssh_lines) == 0:
return "OK: There are no DISKs defined\n"
# Check if there are any critical DISKs
if not any( line.split()[1] in failed_health_status for line in ssh_lines ):
output_info += "OK: All DISKs Online and Healthy \n"
else:
output_info += "CRITICAL: check your DISK status below \n"
set_exit_code(2)
for line in ssh_lines:
# Assign values
slot, status, disk_type, capacity, role = line.split()[0], line.split()[1], line.split()[3], line.split()[4], line.split()[5]
# Check for errors
if status == "Normal":
output_info += "OK: DISK {} status: {}\n".format(slot, status)
else:
output_info += "CRITICAL: DISK {} status: {}\n role: {}\n type: {}\n capacity: {}\n".format(slot, status, role, disk_type, capacity)
return output_info
def lsdiskdomain():
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('show disk_domain general')
ssh_lines = ssh_stdout.readlines()[4:]
output_info = ""
# return if there are no entries on storage system
if len(ssh_lines) == 0:
return "OK: There are no DISK DOMAINs defined\n"
# Check if there are any critical DISK DOMAINs by Health status
if not any( line.split()[2] in failed_health_status for line in ssh_lines ):
output_info += "OK: All DISK DOMAINs Online \n"
else:
output_info += "CRITICAL: check your DISK DOMAIN status \n"
set_exit_code(2)
# Check if there are any critical DISK DOMAINs by Running status
if any( line.split()[3] in failed_running_status for line in ssh_lines ):
# Clear OK/Critical message set by Health Status, because Running is Critical
output_info = ""
output_info += "CRITICAL: Check your DISK DOMAIN status \n"
set_exit_code(2)
for line in ssh_lines:
# Assign values
name, health_status, running_status = line.split()[1], line.split()[2], line.split()[3]
# Check for errors in health status
if health_status == "Normal":
# Check for errors in running status
if running_status in failed_running_status:
output_info += "CRITICAL: DISK DOMAIN {} health status: {} running status: {}\n".format(name, health_status, running_status)
else:
output_info += "OK: DISK DOMAIN {} health status: {} running status: {}\n".format(name, health_status, running_status)
else:
output_info += "CRITICAL: DISK DOMAIN {} health status: {} running status: {}\n".format(name, health_status, running_status)
return output_info
def lsexpansionmodule():
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('show expansion_module')
ssh_lines = ssh_stdout.readlines()[4:]
output_info = ""
# return if there are no entries on storage system
if len(ssh_lines) == 0:
return "OK: There are no EXPANSION MODULEs defined\n"
# Check if there are any critical EXPANSION MODULEs by Health status
if not any( line.split()[1] in failed_health_status for line in ssh_lines ):
output_info += "OK: All EXPANSION MODULEs Online \n"
else:
output_info += "CRITICAL: check your EXPANSION MODULEs status \n"
set_exit_code(2)
# Check if there are any critical EXPANSION MODULEs by Running status
if any( line.split()[2] in failed_running_status for line in ssh_lines ):
# Clear OK/Critical message set by Health Status, because Running is Critical
output_info = ""
output_info += "CRITICAL: Check your EXPANSION MODULEs status \n"
set_exit_code(2)
for line in ssh_lines:
# Assign values
expansion_id, health_status, running_status = line.split()[0], line.split()[1], line.split()[2]
# Check for errors in health status
if health_status == "Normal":
# Check for errors in running status
if running_status in failed_running_status:
output_info += "CRITICAL: EXPANSION MODULE {} health status: {} running status: {}\n".format(expansion_id, health_status, running_status)
else:
output_info += "OK: EXPANSION MODULE {} health status: {} running status: {}\n".format(expansion_id, health_status, running_status)
else:
output_info += "CRITICAL: EXPANSION MODULE {} health status: {} running status: {}\n".format(expansion_id, health_status, running_status)
return output_info
def lsinitiator():
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('show initiator')
ssh_lines = ssh_stdout.readlines()[4:]
output_info = ""
# return if there are no entries on storage system
if len(ssh_lines) == 0:
return "OK: There are no INITIATORs defined\n"
# Check if there are any critical INITIATORs
if not any( line.split()[1] == "Offline" for line in ssh_lines ):
output_info += "OK: All INITIATORs Online \n"
else:
output_info += "WARNING: INITIATOR OFFLINE \n"
set_exit_code(1)
for line in ssh_lines:
# Assign values
name, status = line.split()[0], line.split()[1]
# Check for errors
if status == "Online":
output_info += "OK: INITIATOR {} status: {}\n".format(name, status)
else:
output_info += "WARNING: INITIATOR {} status: {}\n".format(name, status)
return output_info
def lsstoragepool():
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('show storage_pool general')
ssh_lines = ssh_stdout.readlines()[4:]
output_info = ""
# return if there are no entries on storage system
if len(ssh_lines) == 0:
return "OK: There are no STORAGE POOLs defined\n"
# Check if there are any critical STORAGE POOLs by Health status
if not any( line.split()[3] in failed_health_status for line in ssh_lines ):
output_info += "OK: All STORAGE POOLs Online \n"
else:
output_info += "CRITICAL: Check your STORAGE POOL status \n"
set_exit_code(2)
# Check if there are any critical STORAGE POOLs by Running status
if any( line.split()[4] in failed_running_status for line in ssh_lines ):
# Clear OK/Critical message set by Health Status, because Running is Critical
output_info = ""
output_info += "CRITICAL: Check your STORAGE POOL status \n"
set_exit_code(2)
for line in ssh_lines:
# Assign values
name, health_status, running_status = line.split()[1], line.split()[3], line.split()[4]
# Check for errors
if running_status == "Online":
output_info += "OK: STORAGE POOL {} health status: {} running status: {}\n".format(name, health_status, running_status)
else:
output_info += "CRITICAL: STORAGE POOL {} health status: {} running status: {}\n".format(name, health_status, running_status)
return output_info
def lspsu():
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('show power_supply')
ssh_lines = ssh_stdout.readlines()[4:]
output_info = ""
# return if there are no entries on storage system
if len(ssh_lines) == 0:
output_info += "CRITICAL: No PSUs were found \n"
set_exit_code(2)
# Check if there are any critical STORAGE POOLs
if not any( [x for x in re.split("\s{2,}",line) if x][2] in failed_running_status for line in ssh_lines ):
output_info += "OK: All PSU Online \n"
else:
output_info += "CRITICAL: Check your PSU status \n"
set_exit_code(2)
for line in ssh_lines:
# split string per double spaces because of status "No Input" at 2nd column
split_line = [x for x in re.split("\s{2,}",line) if x]
# Assign values
name, health_status, running_status = split_line[0], split_line[1], split_line[2]
# Check for errors
if running_status == "Online":
output_info += "OK: PSU {} health status: {} running status: {}\n".format(name, health_status, running_status)
else:
output_info += "CRITICAL: PSU {} health status: {} running status: {}\n".format(name, health_status, running_status)
return output_info
def lsallstatuses():
global output_info
output_info += lslun() + "\n"
output_info += lsdisk() + "\n"
output_info += lsdiskdomain() + "\n"
output_info += lsexpansionmodule() + "\n"
output_info += lsinitiator() + "\n"
output_info += lsstoragepool() + "\n"
output_info += lspsu() + "\n"
return output_info
def switcher_function(command):
"""Function returns output information for nagios."""
switcher = {
"lslun": lslun,
"lsdisk": lsdisk,
"lsdiskdomain": lsdiskdomain,
"lsexpansionmodule": lsexpansionmodule,
"lsinitiator": lsinitiator,
"lsstoragepool": lsstoragepool,
"lspsu": lspsu,
"lsallstatuses": lsallstatuses,
}
return switcher.get(command)()
help_message = ("""Check Huawei Oceanstor through SSH
Useable commands:
lslun - show lun general | |
<reponame>ctoth/owyl<filename>examples/boids.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""boids -- Boids implementation using Owyl behavior trees.
This module provides example code using the L{owyl} library to
implement the Boids flocking algorithm.
Requirements
============
Note: this demo requires Pyglet, Rabbyt, cocos2d
- B{Pyglet}: U{http://pypi.python.org/pypi/pyglet}
- B{Rabbyt}: U{http://pypi.python.org/pypi/Rabbyt}
- B{cocos}: U{http://cocos2d.org/}
Intent
======
This example demonstrates the basic usage of Owyl, including:
- building and running a Behavior Tree, and
- developing custom behaviors.
Definitions
===========
- B{behavior}: Any unit of a Behavior Tree, as represented by a task
node, branch, or group of parallel behaviors.
- B{task node}: Any atomic Behavior Tree node.
- B{parent node}/B{parent task}: Any task node that has child nodes.
- B{branch}: A parent node and all its children.
- B{node decorator}: A parent node with only one child. Used to add
functionality to a child.
- B{leaf node}/B{leaf task}/B{leaf}: A task node that has no children.
Algorithm
=========
The basic Boids flocking algorithm was developed by Craig
Reynolds. For more information, see his page at
U{http://www.red3d.com/cwr/boids/}.
It's a very simple algorithm, with three basic behaviors:
- "B{Separation}: steer to avoid crowding local flockmates"
- "B{Alignment}: steer towards the average heading of local flockmates"
- "B{Cohesion}: steer to move toward the average position of local
flockmates"
I{(Definitions from <NAME>, linked above)}
This is actually so simple, we wouldn't really need a behavior tree
to model it, but it's a good place to start.
Just to spice things up, we've added some extra behavior: boids will
accelerate as they steer away from too-close flock mates, and they
will seek to match a global speed. This gives the flock more the
appearance of a school of fish, rather than a flight of sparrows, but
it will let us break out some slightly more advanced behaviors.
The boids will also seek after a fixed point (conveniently, the center
of the screen), so that we can observe their movement better.
Building the Tree
=================
See L{Boid.buildTree} below.
Core Behaviors
==============
The core behaviors are documented below in each task nodes'
docstring. They are:
- L{Boid.hasCloseNeighbors}: conditional to detect crowding
- L{Boid.accelerate}: accelerate at a given rate
- L{Boid.matchSpeed}: accelerate to match a given speed
- L{Boid.move}: move straight ahead at current speed
- L{Boid.seek}: seek a fixed goal position
- L{Boid.steerToMatchHeading}: match neighbors' average heading
- L{Boid.steerForSeparation}: steer away from close flockmates
- L{Boid.steerForCohesion}: steer toward average position of neighbors.
Helpers
=======
A number of other helper methods clutter up the namespace. Boid also
inherits from L{steering.Steerable<examples.steering.Steerable>},
which contains common steering helper methods which will be useful in
future examples.
Other Stuff
===========
Copyright 2008 <NAME>. All rights reserved.
$Author$\n
$Rev$\n
$Date$
@newfield blackboard: Blackboard data
"""
__author__ = "$Author$"[9:-2]
__revision__ = "$Rev$"[6:-2]
__date__ = "$Date$"[7:-2]
import os
import random
from math import radians, degrees, sin, cos, pi, atan2
pi_2 = pi*2.0
pi_1_2 = pi/2.0
pi_1_4 = pi/4.0
pi_3_4 = (pi*3)/4
### Optimized attribute getters for sprites..
from operator import attrgetter
getX = attrgetter('x')
getY = attrgetter('y')
getR = attrgetter('rotation')
### Memojito provides memoization (caching) services.
import memojito
### Pyglet provides graphics and resource management.
import pyglet
pyglet.resource.path = [os.path.dirname(os.path.abspath(__file__)),]
pyglet.resource.reindex()
## Cocos provides scene direction and composition
from cocos.director import director
from cocos.scene import Scene
from cocos.actions import FadeIn
from cocos.layer import ScrollableLayer, ScrollingManager
## Rabbyt provides collision detection
from rabbyt.collisions import collide_single
## Owyl provides the wisdom
from owyl import blackboard
import owyl
from steering import Steerable
class Boid(Steerable):
"""Implement a member of a flock.
Boid implements its leaf node behaviors as methods, using the
L{owyl.taskmethod} decorator. Leaf node behaviors may also be
implemented as unbound functions using the L{owyl.task}
decorators.
The boid's behavior tree is built in the L{Boid.buildTree} method,
below.
"""
_img = pyglet.resource.image('triangle_yellow.png')
_img.anchor_x = _img.width / 2
_img.anchor_y = _img.height / 2
boids = []
def __init__(self, blackboard):
super(Boid, self).__init__(self._img)
self.scale = 0.05
self.schedule(self.update)
self.bb = blackboard
self.boids.append(self)
self.opacity = 0
self.do(FadeIn(2))
self.speed = 200
self.bounding_radius = 5
self.bounding_radius_squared = 25
self.neighborhood_radius = 1000
self.personal_radius = 20
self.tree = self.buildTree()
def buildTree(self):
"""Build the behavior tree.
Building the behavior tree is as simple as nesting the
behavior constructor calls.
Building the Behavior Tree
==========================
We'll use a L{parallel<owyl.core.parallel>} parent node as
the root of our tree. Parallel is essentially a round-robin
scheduler. That is, it will run one step on each its children
sequentially, so that the children execute parallel to each
other. Parallel is useful as a root behavior when we want
multiple behaviors to run at the same time, as with Boids.
The first call to a task node constructor returns another
function. Calling I{that} function will return an iterable
generator. (This behavior is provided by the "@task..."
family of python decorators found in L{owyl.core}.)
Generally, you won't have to worry about this unless you're
writing new parent nodes, but keep it in mind.
Also note that keyword arguments can be provided at
construction time (call to task constructor) or at run-time
(call to visit). The C{blackboard} keyword argument to
C{visit} will be available to the entire tree. (This is also
why all nodes should accept C{**kwargs}-style keyword
arguments, and access.
Skipping down to the end of the tree definition, we see the
first use of
L{visit<owyl.core.visit>}. L{visit<owyl.core.visit>} provides
the external iterator interface to the tree. Technically,
it's an implementation of the Visitor pattern. It visits each
"node" of the behavior tree and iterates over it, descending
into children as determined by the logic of the parent
nodes. (In AI terminology, this is a depth-first search, but
with the search logic embedded in the tree.)
L{visit<owyl.core.visit>} is also used internally by several
parent behaviors, including L{parallel<owyl.core.parallel>},
L{limit<owyl.decorators.limit>}, and
L{repeatAlways<owyl.decorators.repeatAlways>} in order to
gain more control over its children.
L{limit<owyl.decorators.limit>}
===============================
The next parent node we see is
L{limit<owyl.decorators.limit>}. L{limit<owyl.decorators.limit>}
is a decorator node designed to limit how often its child is
run (given by the keyword argument C{limit_period} in
seconds). This is useful for limiting the execution of
expensive tasks.
In the example below, we're using
L{limit<owyl.decorators.limit>} to clear memoes once every
0.4 seconds. This implementation of Boids uses
L{memojito<examples.memojito>} to cache (or "memoize")
neighbor data for each Boid. Neighbor data is used by each of
the core behaviors, and is fairly expensive to
calculate. However, it's constantly changing, so adjusting
the limit_period will affect the behavior of the flock (and
the frame rate).
L{repeatAlways<owyl.decorators.repeatAlways>}
=============================================
We next see the L{repeatAlways<owyl.decorators.repeatAlways>}
decorator node. This does exactly as you might expect: it
takes a behavior that might only run once, and repeats it
perpetually, ignoring return values and always yielding None
(the special code for "I'm not done yet, give me another
chance to run").
L{sequence<owyl.decorators.sequence>}
=============================================
Runs a sequence of actions. If any action yields False,
then the rest of the sequence is not executed (the sequence
is halted). Otherwise, the next sequence item is run. In
this example, a boid accelerates away only if it is too close
to another boid.
Core Behaviors
==============
The core behaviors are documented below in each method's
docstring. They are:
- L{Boid.hasCloseNeighbors}: conditional to detect crowding
- L{Boid.accelerate}: accelerate at a given rate
- L{Boid.matchSpeed}: accelerate to match a given speed
- L{Boid.move}: move straight ahead at current speed
- L{Boid.seek}: seek a fixed goal position
- L{Boid.steerToMatchHeading}: match neighbors' average
heading
- L{Boid.steerForSeparation}: steer away from close
flockmates
- L{Boid.steerForCohesion}: steer toward average position of
neighbors.
"""
tree = owyl.parallel(
owyl.limit(
owyl.repeatAlways(self.clearMemoes(), debug=True),
limit_period=0.4),
### Velocity and Acceleration
#############################
owyl.repeatAlways(owyl.sequence(self.hasCloseNeighbors(),
self.accelerate(rate=-.01),
),
),
self.move(),
self.matchSpeed(match_speed=300, rate=.01),
### Steering
############
self.seek(goal=(0, 0), rate=5),
self.steerToMatchHeading(rate=2),
self.steerForSeparation(rate=5),
self.steerForCohesion(rate=2),
policy=owyl.PARALLEL_SUCCESS.REQUIRE_ALL
)
return owyl.visit(tree, blackboard=self.bb)
@owyl.taskmethod
def hasCloseNeighbors(self, **kwargs):
"""Check to see if we have close neighbors.
"""
yield bool(self.closest_neighbors)
@owyl.taskmethod
def accelerate(self, **kwargs):
"""accelerate
@keyword rate: The rate of acceleration (+ or -)
"""
bb = kwargs['blackboard']
rate = kwargs['rate']
dt = bb['dt']
self.speed = max(self.speed + rate * dt, 0)
yield True
@owyl.taskmethod
def matchSpeed(self, **kwargs):
"""Accelerate to match the given speed.
@keyword blackboard: A shared blackboard.
@keyword match_speed: The speed to match.
@keyword rate: The rate of acceleration.
"""
bb = kwargs['blackboard']
| |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import annotations
import datetime
import json
import logging
import re
import shutil
import tempfile
import time
from collections import Counter
from functools import cached_property, lru_cache
from pathlib import Path
from typing import Any, Callable, cast, Dict, Iterator, List, Tuple, Union
import boto3
import numpy as np
from botocore.exceptions import ClientError
from .session import default_session
# -------------------------------------------------------------------------------------------------
class Artifact:
"""
An artifact manages an untarred model artifact of a training job. More precisely, it manages
a local temporary directory which contains all files stored as artifacts.
The artifact ought to be used within a `with` statement. Upon exit, the temporary directory is
cleaned up.
Attributes:
path: The path of the artifact's managed directory.
"""
def __init__(self, path: Path, cleanup: bool):
"""
Initializes a new artifact in the specified directory.
**Note: Do not call this initializer yourself. It is merely returned when accessing the
artifacts of a training job.**
"""
self.path = path
self.cleanup = cleanup
def __enter__(self) -> Artifact:
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
if self.cleanup:
shutil.rmtree(self.path)
# -------------------------------------------------------------------------------------------------
class TrainingJob:
"""
A training job represents a Sagemaker training job within an experiment.
"""
def __init__(self, info: Any):
"""
Initializes a new training job, using the specified boto3 session.
**Note: This method should only be called in the context of an Analysis object. Do not use
this initializer yourself.**
"""
self.info = info
@property
def name(self) -> str:
"""
Returns the name of the training job.
"""
return self.info["TrainingJobName"]
@property
def status(self) -> str:
"""
Returns the status of the training job.
"""
return self.info["TrainingJobStatus"]
@property
def date_created(self) -> datetime.datetime:
"""
Returns the date and time when the training job was created.
"""
return self.info["CreationTime"]
@property
def hyperparameters(self) -> Dict[str, Any]:
"""
Returns all user-defined hyper parameters.
"""
return {
k: _process_hyperparameter_value(v)
for k, v in self.info["HyperParameters"].items()
if not k.startswith("sagemaker_")
and not k.endswith("_output_distribution")
}
@lru_cache()
def pull_logs(self) -> List[str]:
"""
Pulls the training job's logs such that subsequent accesses to the `logs` property are
noops.
"""
# Check if the logs are already available locally
log_file = self._cache_dir() / "logs.txt"
if log_file.exists():
with log_file.open("r") as f:
return f.read().split("\n")
# If not, fetch them
client = default_session().client("logs")
streams = client.describe_log_streams(
logGroupName="/aws/sagemaker/TrainingJobs",
logStreamNamePrefix=self.info["TrainingJobName"],
)
res = []
for stream in streams["logStreams"]:
params = {
"logGroupName": "/aws/sagemaker/TrainingJobs",
"logStreamName": stream["logStreamName"],
"startFromHead": True,
}
result = client.get_log_events(**params)
res.extend([event["message"] for event in result["events"]])
while "nextForwardToken" in result:
next_token = result["nextForwardToken"]
result = client.get_log_events(nextToken=next_token, **params)
if result["nextForwardToken"] == next_token:
# The same token as before indicates end of stream, see
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs.html#CloudWatchLogs.Client.get_log_events
break
res.extend([event["message"] for event in result["events"]])
# Store them
log_file.parent.mkdir(parents=True, exist_ok=True)
with log_file.open("w") as f:
f.write("\n".join(res))
# And return them
return res
@property
def logs(self) -> List[str]:
"""
Retrieves the logs emitted by this training job.
"""
# We can't put the `pull_logs` code here directly since `cached_property` seems to be CPU-
# bound for some odd reason.
return self.pull_logs()
@cached_property
def metrics(self) -> Dict[str, np.ndarray]:
"""
Fetches the metrics defined by the training script from the training job's logs. For each
metric, it returns a 1D NumPy array (ordered chronologically).
"""
# Check if the logs are already available locally
metrics_file = self._cache_dir() / "metrics.json"
if metrics_file.exists():
with metrics_file.open("r") as f:
return {
k: np.array(v, dtype=np.float32)
for k, v in json.load(f).items()
}
# If not, get them from the logs, write them to the file system and return
metrics = {
metric["Name"]: [
float(x)
for x in re.findall(metric["Regex"], "\n".join(self.logs))
]
for metric in self.info["AlgorithmSpecification"][
"MetricDefinitions"
]
}
with metrics_file.open("w+") as f:
json.dump(metrics, f)
# Return them as numpy arrays
return {k: np.array(v, dtype=np.float32) for k, v in metrics.items()}
def artifact(self, cache: bool = True) -> Artifact:
"""
Retrieves the model artifact from S3 and stores it locally in a temporary directory.
Args:
cache: Whether to cache the extracted artifact.
Returns:
The artifact which contains the untarred model artifact directory. The artifact should
be wrapped in a `with` statement such that the directory is cleaned up after usage.
"""
cache_dir = self._cache_dir() / "artifacts"
# First, we check whether the model is already available locally. For this, the `cache`
# flag is irrelevant
if cache_dir.exists():
return Artifact(cache_dir, cleanup=False)
# If not, we need to download the artifact. For that, we need to get the bucket and object
# path
regex = r"^s3://([A-z0-9-_]*)/(.*)$"
bucket_name, object_path = re.findall(
regex, self.info["ModelArtifacts"]["S3ModelArtifacts"]
)[0]
# Then, we can download the model
s3 = default_session().client("s3")
with tempfile.NamedTemporaryFile(suffix=".tar.gz") as tmp:
s3.download_fileobj(bucket_name, object_path, tmp)
tmp.seek(0)
# As soon as it is downloaded, we can unpack the tar into the cache directory or a
# temporary one
if cache:
cache_dir.mkdir(exist_ok=True, parents=True)
target = cache_dir
else:
target = Path(tempfile.mkdtemp())
shutil.unpack_archive(tmp.name, target)
# And return the artifact
return Artifact(target, cleanup=not cache)
def move_to(self, experiment: str) -> None:
"""
Updates the experiment tag to the provided name.
"""
client = default_session().client("sagemaker")
client.add_tags(
ResourceArn=self.info["TrainingJobArn"],
Tags=[{"Key": "Experiment", "Value": experiment}],
)
def delete(self) -> None:
"""
Deletes the training job by removing all tags associated with it.
"""
client = default_session().client("sagemaker")
existing_tags = client.list_tags(
ResourceArn=self.info["TrainingJobArn"],
MaxResults=100,
)
experiment = [
t["Value"]
for t in existing_tags["Tags"]
if t["Key"] == "Experiment"
][0]
client.add_tags(
ResourceArn=self.info["TrainingJobArn"],
Tags=[{"Key": "OriginalExperiment", "Value": experiment}],
)
client.delete_tags(
ResourceArn=self.info["TrainingJobArn"],
TagKeys=["Experiment"],
)
def __repr__(self) -> str:
return f"TrainingJob(name={self.info['TrainingJobName']})"
def _cache_dir(self) -> Path:
return (
Path.home()
/ "tsbench"
/ "cache"
/ cast(str, self.info["TrainingJobName"])
)
# -------------------------------------------------------------------------------------------------
class Analysis:
"""
The analysis object allows analyzing a set of training jobs that belong to the same experiment.
"""
def __init__(
self,
experiment: str,
only_completed: bool = True,
include: Callable[[TrainingJob], bool] = lambda _: True,
resolve_duplicates: bool = True,
):
"""
Initializes a new analysis object, using the specified session to make requests to AWS and
Sagemaker. The initializer already fetches all training jobs belonging to the provided
experiment.
Args:
session: The session to interact with AWS services.
experiment: The name of the experiment to analyze.
only_completed: Whether to ignore runs which have not completed successfully (a
warning will be emitted nonetheless).
include: Whether the training job should be included in the summary. By default, it
returns True for any job. If `only_completed` is set to True, only completed jobs
will be passed to this callback.
resolve_duplicates: Whether to exclude the older experiments if experiments with the
same hyperparameters are found.
"""
self.experiment_name = experiment
training_jobs, duplicates = _fetch_training_jobs(
default_session(),
self.experiment_name,
only_completed,
resolve_duplicates,
)
self.duplicates = duplicates
self.map = {t.name: t for t in training_jobs if include(t)}
if len(self.map) < len(training_jobs):
logging.warning(
" Analysis manually excludes %d jobs",
len(training_jobs) - len(self.map),
)
def get(self, name: str) -> TrainingJob:
"""
Returns the training job with the specified name.
"""
return self.map[name]
@property
def status(self) -> Dict[str, int]:
"""
Returns the aggregate statistics about the status of all jobs.
"""
c = Counter([t.status for t in self.map.values()])
return dict(c)
def __iter__(self) -> Iterator[TrainingJob]:
return iter(self.map.values())
def __len__(self) -> int:
return len(self.map)
def __repr__(self) -> str:
return f"Analysis(experiment='{self.experiment_name}', num_jobs={len(self):,})"
# -------------------------------------------------------------------------------------------------
def _fetch_training_jobs(
session: boto3.Session,
experiment: str,
only_completed: bool,
resolve_duplicates: bool,
) -> Tuple[List[TrainingJob], List[TrainingJob]]:
"""
Fetches all training jobs which are associated with this experiment.
"""
client = session.client("sagemaker")
search_params = {
"MaxResults": 100,
"Resource": "TrainingJob",
"SearchExpression": {
"Filters": [
{
"Name": "Tags.Experiment",
"Operator": "Equals",
"Value": experiment,
}
],
},
}
while True:
try:
response = client.search(**search_params)
break
except ClientError:
time.sleep(1)
results = response["Results"]
while "NextToken" in response:
while True:
try:
response = client.search(
NextToken=response["NextToken"], **search_params
)
results.extend(response["Results"])
break
except ClientError:
| |
"""Kea subnet-id sanity-check"""
# pylint: disable=invalid-name,line-too-long
import pytest
import misc
import srv_control
import srv_msg
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_fix_able():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_leases('logs')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
srv_msg.log_contains('DHCPSRV_LEASE_SANITY_FIXED The lease 2001:db8::1 with subnet-id 666 failed subnet-id checks, but was corrected to subnet-id 999.')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_fix_able_double_restart():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_leases('logs')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
srv_msg.log_contains('DHCPSRV_LEASE_SANITY_FIXED The lease 2001:db8::1 with subnet-id 666 failed subnet-id checks, but was corrected to subnet-id 999.')
srv_msg.forge_sleep('13', 'seconds')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '987654321')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
# Pause the Test.
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_fix_unable():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_leases('logs')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_fix_del_unable():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix-del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_leases('logs')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix-del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_fix_del_able():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix-del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix-del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'reconfigured')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_warn():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"warn"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"warn"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'reconfigured')
srv_msg.forge_sleep('2', 'seconds')
srv_msg.log_contains('DHCPSRV_LEASE_SANITY_FAIL The lease 2001:db8::1 with subnet-id 666 failed subnet-id checks.')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:33')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Response', '13', '3', None, 'statuscode', '2')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_del_renew():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_leases('logs')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
srv_msg.forge_sleep('2', 'seconds')
srv_msg.log_contains('DHCPSRV_LEASE_SANITY_FAIL_DISCARD The lease 2001:db8::1 with subnet-id 666 failed subnet-id checks and was dropped.')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Response', '13', '3', None, 'statuscode', '2')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('999,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_msg.lease_file_doesnt_contain('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.lease_file_doesnt_contain('999,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:22')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_del():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"del"}')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_leases('logs')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"del"}')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
srv_msg.forge_sleep('2', 'seconds')
srv_msg.log_contains('DHCPSRV_LEASE_SANITY_FAIL_DISCARD The lease 2001:db8::1 with subnet-id 666 failed subnet-id checks and was dropped.')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '7654321')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.send_ctrl_cmd_via_socket('{"command":"lease6-get","arguments":{"ip-address": "2001:db8::1"}}')
srv_msg.send_ctrl_cmd_via_socket('{"command":"lease6-get","arguments":{"subnet-id":666,"identifier-type":"duid", "identifier": "00:03:00:01:f6:f5:f4:f3:f2:01"}}')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
# Pause the | |
,'p2':320011})
create_and_post({'t':'line' ,'p1':320011 ,'p2':310011})
create_and_post({'t':'line' ,'p1':310011 ,'p2':300010})
create_and_post({'t':'line' ,'p1':230003 ,'p2':240003})
create_and_post({'t':'line' ,'p1':210005 ,'p2':210006})
create_and_post({'t':'line' ,'p1':260005 ,'p2':260006})
create_and_post({'t':'line' ,'p1':230008 ,'p2':240008})
create_and_post({'t':'line' ,'p1':410003 ,'p2':420003})
create_and_post({'t':'line' ,'p1':390005 ,'p2':390006})
create_and_post({'t':'line' ,'p1':440005 ,'p2':440006})
create_and_post({'t':'line' ,'p1':410008 ,'p2':420008})
create_and_post({'t':'line' ,'p1':360000 ,'p2':380000})
create_and_post({'t':'line' ,'p1':380000 ,'p2':390000})
create_and_post({'t':'line' ,'p1':390000 ,'p2':400000})
create_and_post({'t':'line' ,'p1':360000 ,'p2':360002})
create_and_post({'t':'line' ,'p1':360002 ,'p2':360003})
create_and_post({'t':'line' ,'p1':360003 ,'p2':360004})
create_and_post({'t':'line' ,'p1':360004 ,'p2':400004})
create_and_post({'t':'line' ,'p1':400004 ,'p2':400000})
create_and_post({'t':'line' ,'p1':400000 ,'p2':410001})
create_and_post({'t':'line' ,'p1':410001 ,'p2':410002})
create_and_post({'t':'line' ,'p1':410002 ,'p2':410003})
create_and_post({'t':'line' ,'p1':410003 ,'p2':410005})
create_and_post({'t':'line' ,'p1':410005 ,'p2':400004})
create_and_post({'t':'line' ,'p1':410005 ,'p2':390005})
create_and_post({'t':'line' ,'p1':390005 ,'p2':380005})
create_and_post({'t':'line' ,'p1':380005 ,'p2':370005})
create_and_post({'t':'line' ,'p1':370005 ,'p2':360004})
create_and_post({'t':'line' ,'p1':420000 ,'p2':440000})
create_and_post({'t':'line' ,'p1':440000 ,'p2':450000})
create_and_post({'t':'line' ,'p1':450000 ,'p2':460000})
create_and_post({'t':'line' ,'p1':420000 ,'p2':420002})
create_and_post({'t':'line' ,'p1':420002 ,'p2':420003})
create_and_post({'t':'line' ,'p1':420003 ,'p2':420004})
create_and_post({'t':'line' ,'p1':420004 ,'p2':460004})
create_and_post({'t':'line' ,'p1':460004 ,'p2':460000})
create_and_post({'t':'line' ,'p1':460000 ,'p2':470001})
create_and_post({'t':'line' ,'p1':470001 ,'p2':470002})
create_and_post({'t':'line' ,'p1':470002 ,'p2':470003})
create_and_post({'t':'line' ,'p1':470003 ,'p2':470005})
create_and_post({'t':'line' ,'p1':470005 ,'p2':460004})
create_and_post({'t':'line' ,'p1':470005 ,'p2':450005})
create_and_post({'t':'line' ,'p1':450005 ,'p2':440005})
create_and_post({'t':'line' ,'p1':440005 ,'p2':430005})
create_and_post({'t':'line' ,'p1':430005 ,'p2':420004})
create_and_post({'t':'line' ,'p1':360006 ,'p2':380006})
create_and_post({'t':'line' ,'p1':380006 ,'p2':390006})
create_and_post({'t':'line' ,'p1':390006 ,'p2':400006})
create_and_post({'t':'line' ,'p1':360006 ,'p2':360008})
create_and_post({'t':'line' ,'p1':360008 ,'p2':360009})
create_and_post({'t':'line' ,'p1':360009 ,'p2':360010})
create_and_post({'t':'line' ,'p1':360010 ,'p2':400010})
create_and_post({'t':'line' ,'p1':400010 ,'p2':400006})
create_and_post({'t':'line' ,'p1':400006 ,'p2':410007})
create_and_post({'t':'line' ,'p1':410007 ,'p2':410008})
create_and_post({'t':'line' ,'p1':410008 ,'p2':410009})
create_and_post({'t':'line' ,'p1':410009 ,'p2':410011})
create_and_post({'t':'line' ,'p1':410011 ,'p2':400010})
create_and_post({'t':'line' ,'p1':410011 ,'p2':390011})
create_and_post({'t':'line' ,'p1':390011 ,'p2':380011})
create_and_post({'t':'line' ,'p1':380011 ,'p2':370011})
create_and_post({'t':'line' ,'p1':370011 ,'p2':360010})
create_and_post({'t':'line' ,'p1':420006 ,'p2':440006})
create_and_post({'t':'line' ,'p1':440006 ,'p2':450006})
create_and_post({'t':'line' ,'p1':450006 ,'p2':460006})
create_and_post({'t':'line' ,'p1':420006 ,'p2':420008})
create_and_post({'t':'line' ,'p1':420008 ,'p2':420009})
create_and_post({'t':'line' ,'p1':420009 ,'p2':420010})
create_and_post({'t':'line' ,'p1':420010 ,'p2':460010})
create_and_post({'t':'line' ,'p1':460010 ,'p2':460006})
create_and_post({'t':'line' ,'p1':460006 ,'p2':470007})
create_and_post({'t':'line' ,'p1':470007 ,'p2':470008})
create_and_post({'t':'line' ,'p1':470008 ,'p2':470009})
create_and_post({'t':'line' ,'p1':470009 ,'p2':470011})
create_and_post({'t':'line' ,'p1':470011 ,'p2':460010})
create_and_post({'t':'line' ,'p1':470011 ,'p2':450011})
create_and_post({'t':'line' ,'p1':450011 ,'p2':440011})
create_and_post({'t':'line' ,'p1':440011 ,'p2':430011})
create_and_post({'t':'line' ,'p1':430011 ,'p2':420010})
create_and_post({'t':'line' ,'p1':350003 ,'p2':360003})
create_and_post({'t':'line' ,'p1':330005 ,'p2':330006})
create_and_post({'t':'line' ,'p1':380005 ,'p2':380006})
create_and_post({'t':'line' ,'p1':350008 ,'p2':360008})
create_and_post({'t':'line' ,'p1':530003 ,'p2':540003})
create_and_post({'t':'line' ,'p1':510005 ,'p2':510006})
create_and_post({'t':'line' ,'p1':560005 ,'p2':560006})
create_and_post({'t':'line' ,'p1':530008 ,'p2':540008})
create_and_post({'t':'line' ,'p1':480000 ,'p2':500000})
create_and_post({'t':'line' ,'p1':500000 ,'p2':510000})
create_and_post({'t':'line' ,'p1':510000 ,'p2':520000})
create_and_post({'t':'line' ,'p1':480000 ,'p2':480002})
create_and_post({'t':'line' ,'p1':480002 ,'p2':480003})
create_and_post({'t':'line' ,'p1':480003 ,'p2':480004})
create_and_post({'t':'line' ,'p1':480004 ,'p2':520004})
create_and_post({'t':'line' ,'p1':520004 ,'p2':520000})
create_and_post({'t':'line' ,'p1':520000 ,'p2':530001})
create_and_post({'t':'line' ,'p1':530001 ,'p2':530002})
create_and_post({'t':'line' ,'p1':530002 ,'p2':530003})
create_and_post({'t':'line' ,'p1':530003 ,'p2':530005})
create_and_post({'t':'line' ,'p1':530005 ,'p2':520004})
create_and_post({'t':'line' ,'p1':530005 ,'p2':510005})
create_and_post({'t':'line' ,'p1':510005 ,'p2':500005})
create_and_post({'t':'line' ,'p1':500005 ,'p2':490005})
create_and_post({'t':'line' ,'p1':490005 ,'p2':480004})
create_and_post({'t':'line' ,'p1':540000 ,'p2':560000})
create_and_post({'t':'line' ,'p1':560000 ,'p2':570000})
create_and_post({'t':'line' ,'p1':570000 ,'p2':580000})
create_and_post({'t':'line' ,'p1':540000 ,'p2':540002})
create_and_post({'t':'line' ,'p1':540002 ,'p2':540003})
create_and_post({'t':'line' ,'p1':540003 ,'p2':540004})
create_and_post({'t':'line' ,'p1':540004 ,'p2':580004})
create_and_post({'t':'line' ,'p1':580004 ,'p2':580000})
create_and_post({'t':'line' ,'p1':580000 ,'p2':590001})
create_and_post({'t':'line' ,'p1':590001 ,'p2':590002})
create_and_post({'t':'line' ,'p1':590002 ,'p2':590003})
create_and_post({'t':'line' ,'p1':590003 ,'p2':590005})
create_and_post({'t':'line' ,'p1':590005 ,'p2':580004})
create_and_post({'t':'line' ,'p1':590005 ,'p2':570005})
create_and_post({'t':'line' ,'p1':570005 ,'p2':560005})
create_and_post({'t':'line' ,'p1':560005 ,'p2':550005})
create_and_post({'t':'line' ,'p1':550005 ,'p2':540004})
create_and_post({'t':'line' ,'p1':480006 ,'p2':500006})
create_and_post({'t':'line' ,'p1':500006 ,'p2':510006})
create_and_post({'t':'line' ,'p1':510006 ,'p2':520006})
create_and_post({'t':'line' ,'p1':480006 ,'p2':480008})
create_and_post({'t':'line' ,'p1':480008 ,'p2':480009})
create_and_post({'t':'line' ,'p1':480009 ,'p2':480010})
create_and_post({'t':'line' ,'p1':480010 ,'p2':520010})
create_and_post({'t':'line' ,'p1':520010 ,'p2':520006})
create_and_post({'t':'line' ,'p1':520006 ,'p2':530007})
create_and_post({'t':'line' ,'p1':530007 ,'p2':530008})
create_and_post({'t':'line' ,'p1':530008 ,'p2':530009})
create_and_post({'t':'line' ,'p1':530009 ,'p2':530011})
create_and_post({'t':'line' ,'p1':530011 ,'p2':520010})
create_and_post({'t':'line' ,'p1':530011 ,'p2':510011})
create_and_post({'t':'line' ,'p1':510011 ,'p2':500011})
create_and_post({'t':'line' ,'p1':500011 ,'p2':490011})
create_and_post({'t':'line' ,'p1':490011 ,'p2':480010})
create_and_post({'t':'line' ,'p1':540006 ,'p2':560006})
create_and_post({'t':'line' ,'p1':560006 ,'p2':570006})
create_and_post({'t':'line' ,'p1':570006 ,'p2':580006})
create_and_post({'t':'line' ,'p1':540006 ,'p2':540008})
create_and_post({'t':'line' ,'p1':540008 ,'p2':540009})
create_and_post({'t':'line' ,'p1':540009 ,'p2':540010})
create_and_post({'t':'line' ,'p1':540010 ,'p2':580010})
create_and_post({'t':'line' ,'p1':580010 ,'p2':580006})
create_and_post({'t':'line' ,'p1':580006 ,'p2':590007})
create_and_post({'t':'line' ,'p1':590007 ,'p2':590008})
create_and_post({'t':'line' ,'p1':590008 ,'p2':590009})
create_and_post({'t':'line' ,'p1':590009 ,'p2':590011})
create_and_post({'t':'line' ,'p1':590011 ,'p2':580010})
create_and_post({'t':'line' ,'p1':590011 ,'p2':570011})
create_and_post({'t':'line' ,'p1':570011 ,'p2':560011})
create_and_post({'t':'line' ,'p1':560011 ,'p2':550011})
create_and_post({'t':'line' ,'p1':550011 ,'p2':540010})
create_and_post({'t':'line' ,'p1':470003 ,'p2':480003})
create_and_post({'t':'line' ,'p1':450005 ,'p2':450006})
create_and_post({'t':'line' ,'p1':500005 ,'p2':500006})
create_and_post({'t':'line' ,'p1':470008 ,'p2':480008})
create_and_post({'t':'line' ,'p1':650003 ,'p2':660003})
create_and_post({'t':'line' ,'p1':630005 ,'p2':630006})
create_and_post({'t':'line' ,'p1':680005 ,'p2':680006})
create_and_post({'t':'line' ,'p1':650008 ,'p2':660008})
create_and_post({'t':'line' ,'p1':600000 ,'p2':620000})
create_and_post({'t':'line' ,'p1':620000 ,'p2':630000})
create_and_post({'t':'line' ,'p1':630000 ,'p2':640000})
create_and_post({'t':'line' ,'p1':600000 ,'p2':600002})
create_and_post({'t':'line' ,'p1':600002 ,'p2':600003})
create_and_post({'t':'line' ,'p1':600003 ,'p2':600004})
create_and_post({'t':'line' ,'p1':600004 ,'p2':640004})
create_and_post({'t':'line' ,'p1':640004 ,'p2':640000})
create_and_post({'t':'line' ,'p1':640000 ,'p2':650001})
create_and_post({'t':'line' ,'p1':650001 ,'p2':650002})
create_and_post({'t':'line' ,'p1':650002 ,'p2':650003})
create_and_post({'t':'line' ,'p1':650003 ,'p2':650005})
create_and_post({'t':'line' ,'p1':650005 ,'p2':640004})
create_and_post({'t':'line' ,'p1':650005 ,'p2':630005})
create_and_post({'t':'line' ,'p1':630005 ,'p2':620005})
create_and_post({'t':'line' ,'p1':620005 ,'p2':610005})
create_and_post({'t':'line' ,'p1':610005 ,'p2':600004})
create_and_post({'t':'line' ,'p1':660000 ,'p2':680000})
create_and_post({'t':'line' ,'p1':680000 ,'p2':690000})
create_and_post({'t':'line' ,'p1':690000 ,'p2':700000})
create_and_post({'t':'line' ,'p1':660000 ,'p2':660002})
create_and_post({'t':'line' ,'p1':660002 ,'p2':660003})
create_and_post({'t':'line' ,'p1':660003 ,'p2':660004})
create_and_post({'t':'line' ,'p1':660004 ,'p2':700004})
create_and_post({'t':'line' ,'p1':700004 ,'p2':700000})
create_and_post({'t':'line' ,'p1':700000 ,'p2':710001})
create_and_post({'t':'line' ,'p1':710001 ,'p2':710002})
create_and_post({'t':'line' ,'p1':710002 ,'p2':710003})
create_and_post({'t':'line' ,'p1':710003 ,'p2':710005})
create_and_post({'t':'line' ,'p1':710005 ,'p2':700004})
create_and_post({'t':'line' ,'p1':710005 ,'p2':690005})
create_and_post({'t':'line' ,'p1':690005 ,'p2':680005})
create_and_post({'t':'line' ,'p1':680005 ,'p2':670005})
create_and_post({'t':'line' ,'p1':670005 ,'p2':660004})
create_and_post({'t':'line' ,'p1':600006 ,'p2':620006})
create_and_post({'t':'line' ,'p1':620006 ,'p2':630006})
create_and_post({'t':'line' ,'p1':630006 ,'p2':640006})
create_and_post({'t':'line' ,'p1':600006 ,'p2':600008})
create_and_post({'t':'line' ,'p1':600008 ,'p2':600009})
create_and_post({'t':'line' ,'p1':600009 ,'p2':600010})
create_and_post({'t':'line' ,'p1':600010 ,'p2':640010})
create_and_post({'t':'line' ,'p1':640010 ,'p2':640006})
create_and_post({'t':'line' ,'p1':640006 ,'p2':650007})
create_and_post({'t':'line' ,'p1':650007 ,'p2':650008})
create_and_post({'t':'line' ,'p1':650008 ,'p2':650009})
create_and_post({'t':'line' ,'p1':650009 ,'p2':650011})
create_and_post({'t':'line' ,'p1':650011 ,'p2':640010})
create_and_post({'t':'line' ,'p1':650011 ,'p2':630011})
create_and_post({'t':'line' ,'p1':630011 ,'p2':620011})
create_and_post({'t':'line' ,'p1':620011 ,'p2':610011})
create_and_post({'t':'line' ,'p1':610011 ,'p2':600010})
create_and_post({'t':'line' ,'p1':660006 ,'p2':680006})
create_and_post({'t':'line' ,'p1':680006 ,'p2':690006})
create_and_post({'t':'line' ,'p1':690006 ,'p2':700006})
create_and_post({'t':'line' ,'p1':660006 ,'p2':660008})
create_and_post({'t':'line' ,'p1':660008 ,'p2':660009})
create_and_post({'t':'line' ,'p1':660009 ,'p2':660010})
create_and_post({'t':'line' ,'p1':660010 ,'p2':700010})
create_and_post({'t':'line' ,'p1':700010 ,'p2':700006})
create_and_post({'t':'line' ,'p1':700006 ,'p2':710007})
create_and_post({'t':'line' ,'p1':710007 ,'p2':710008})
create_and_post({'t':'line' ,'p1':710008 ,'p2':710009})
create_and_post({'t':'line' ,'p1':710009 ,'p2':710011})
create_and_post({'t':'line' ,'p1':710011 ,'p2':700010})
create_and_post({'t':'line' ,'p1':710011 ,'p2':690011})
create_and_post({'t':'line' ,'p1':690011 ,'p2':680011})
create_and_post({'t':'line' ,'p1':680011 ,'p2':670011})
create_and_post({'t':'line' ,'p1':670011 ,'p2':660010})
create_and_post({'t':'line' ,'p1':590003 ,'p2':600003})
create_and_post({'t':'line' ,'p1':570005 ,'p2':570006})
create_and_post({'t':'line' ,'p1':620005 ,'p2':620006})
create_and_post({'t':'line' ,'p1':590008 ,'p2':600008})
create_and_post({'t':'line' ,'p1':770003 ,'p2':780003})
create_and_post({'t':'line' ,'p1':750005 ,'p2':750006})
create_and_post({'t':'line' ,'p1':800005 ,'p2':800006})
create_and_post({'t':'line' ,'p1':770008 ,'p2':780008})
create_and_post({'t':'line' ,'p1':720000 ,'p2':740000})
create_and_post({'t':'line' ,'p1':740000 ,'p2':750000})
create_and_post({'t':'line' ,'p1':750000 ,'p2':760000})
create_and_post({'t':'line' ,'p1':720000 ,'p2':720002})
create_and_post({'t':'line' ,'p1':720002 ,'p2':720003})
create_and_post({'t':'line' ,'p1':720003 ,'p2':720004})
create_and_post({'t':'line' ,'p1':720004 ,'p2':760004})
create_and_post({'t':'line' ,'p1':760004 ,'p2':760000})
create_and_post({'t':'line' ,'p1':760000 ,'p2':770001})
create_and_post({'t':'line' ,'p1':770001 ,'p2':770002})
create_and_post({'t':'line' ,'p1':770002 ,'p2':770003})
create_and_post({'t':'line' ,'p1':770003 ,'p2':770005})
create_and_post({'t':'line' ,'p1':770005 ,'p2':760004})
create_and_post({'t':'line' ,'p1':770005 ,'p2':750005})
create_and_post({'t':'line' ,'p1':750005 ,'p2':740005})
create_and_post({'t':'line' ,'p1':740005 ,'p2':730005})
create_and_post({'t':'line' ,'p1':730005 ,'p2':720004})
create_and_post({'t':'line' ,'p1':780000 ,'p2':800000})
create_and_post({'t':'line' ,'p1':800000 ,'p2':810000})
create_and_post({'t':'line' ,'p1':810000 ,'p2':820000})
create_and_post({'t':'line' ,'p1':780000 ,'p2':780002})
create_and_post({'t':'line' ,'p1':780002 ,'p2':780003})
create_and_post({'t':'line' ,'p1':780003 ,'p2':780004})
create_and_post({'t':'line' ,'p1':780004 ,'p2':820004})
create_and_post({'t':'line' ,'p1':820004 ,'p2':820000})
create_and_post({'t':'line' ,'p1':820000 ,'p2':830001})
create_and_post({'t':'line' ,'p1':830001 ,'p2':830002})
create_and_post({'t':'line' ,'p1':830002 ,'p2':830003})
create_and_post({'t':'line' ,'p1':830003 ,'p2':830005})
create_and_post({'t':'line' ,'p1':830005 ,'p2':820004})
create_and_post({'t':'line' ,'p1':830005 ,'p2':810005})
create_and_post({'t':'line' ,'p1':810005 ,'p2':800005})
create_and_post({'t':'line' ,'p1':800005 ,'p2':790005})
create_and_post({'t':'line' ,'p1':790005 ,'p2':780004})
create_and_post({'t':'line' ,'p1':720006 ,'p2':740006})
create_and_post({'t':'line' ,'p1':740006 ,'p2':750006})
create_and_post({'t':'line' ,'p1':750006 ,'p2':760006})
create_and_post({'t':'line' ,'p1':720006 ,'p2':720008})
create_and_post({'t':'line' ,'p1':720008 ,'p2':720009})
create_and_post({'t':'line' ,'p1':720009 ,'p2':720010})
create_and_post({'t':'line' ,'p1':720010 ,'p2':760010})
create_and_post({'t':'line' ,'p1':760010 ,'p2':760006})
create_and_post({'t':'line' ,'p1':760006 ,'p2':770007})
create_and_post({'t':'line' ,'p1':770007 ,'p2':770008})
create_and_post({'t':'line' ,'p1':770008 ,'p2':770009})
create_and_post({'t':'line' ,'p1':770009 ,'p2':770011})
create_and_post({'t':'line' ,'p1':770011 ,'p2':760010})
create_and_post({'t':'line' ,'p1':770011 ,'p2':750011})
create_and_post({'t':'line' ,'p1':750011 ,'p2':740011})
create_and_post({'t':'line' ,'p1':740011 ,'p2':730011})
create_and_post({'t':'line' ,'p1':730011 ,'p2':720010})
create_and_post({'t':'line' ,'p1':780006 ,'p2':800006})
create_and_post({'t':'line' ,'p1':800006 ,'p2':810006})
create_and_post({'t':'line' ,'p1':810006 ,'p2':820006})
create_and_post({'t':'line' ,'p1':780006 ,'p2':780008})
create_and_post({'t':'line' ,'p1':780008 ,'p2':780009})
create_and_post({'t':'line' ,'p1':780009 ,'p2':780010})
create_and_post({'t':'line' ,'p1':780010 ,'p2':820010})
create_and_post({'t':'line' ,'p1':820010 ,'p2':820006})
create_and_post({'t':'line' ,'p1':820006 ,'p2':830007})
create_and_post({'t':'line' ,'p1':830007 ,'p2':830008})
create_and_post({'t':'line' ,'p1':830008 ,'p2':830009})
create_and_post({'t':'line' ,'p1':830009 ,'p2':830011})
create_and_post({'t':'line' ,'p1':830011 ,'p2':820010})
create_and_post({'t':'line' ,'p1':830011 ,'p2':810011})
create_and_post({'t':'line' ,'p1':810011 ,'p2':800011})
create_and_post({'t':'line' ,'p1':800011 ,'p2':790011})
create_and_post({'t':'line' ,'p1':790011 ,'p2':780010})
create_and_post({'t':'line' ,'p1':710003 ,'p2':720003})
create_and_post({'t':'line' ,'p1':690005 ,'p2':690006})
create_and_post({'t':'line' ,'p1':740005 ,'p2':740006})
create_and_post({'t':'line' ,'p1':710008 ,'p2':720008})
create_and_post({'t':'line' ,'p1':890003 ,'p2':900003})
create_and_post({'t':'line' ,'p1':870005 ,'p2':870006})
create_and_post({'t':'line' ,'p1':920005 ,'p2':920006})
create_and_post({'t':'line' ,'p1':890008 ,'p2':900008})
create_and_post({'t':'line' ,'p1':840000 ,'p2':860000})
create_and_post({'t':'line' ,'p1':860000 ,'p2':870000})
create_and_post({'t':'line' ,'p1':870000 ,'p2':880000})
create_and_post({'t':'line' ,'p1':840000 ,'p2':840002})
create_and_post({'t':'line' ,'p1':840002 ,'p2':840003})
create_and_post({'t':'line' ,'p1':840003 ,'p2':840004})
create_and_post({'t':'line' ,'p1':840004 ,'p2':880004})
create_and_post({'t':'line' ,'p1':880004 ,'p2':880000})
create_and_post({'t':'line' ,'p1':880000 ,'p2':890001})
create_and_post({'t':'line' ,'p1':890001 ,'p2':890002})
create_and_post({'t':'line' ,'p1':890002 ,'p2':890003})
create_and_post({'t':'line' ,'p1':890003 ,'p2':890005})
create_and_post({'t':'line' ,'p1':890005 ,'p2':880004})
create_and_post({'t':'line' ,'p1':890005 ,'p2':870005})
create_and_post({'t':'line' ,'p1':870005 ,'p2':860005})
create_and_post({'t':'line' ,'p1':860005 ,'p2':850005})
create_and_post({'t':'line' ,'p1':850005 ,'p2':840004})
create_and_post({'t':'line' ,'p1':900000 ,'p2':920000})
create_and_post({'t':'line' ,'p1':920000 ,'p2':930000})
create_and_post({'t':'line' ,'p1':930000 ,'p2':940000})
create_and_post({'t':'line' ,'p1':900000 ,'p2':900002})
create_and_post({'t':'line' ,'p1':900002 ,'p2':900003})
create_and_post({'t':'line' ,'p1':900003 ,'p2':900004})
create_and_post({'t':'line' ,'p1':900004 ,'p2':940004})
create_and_post({'t':'line' ,'p1':940004 ,'p2':940000})
create_and_post({'t':'line' ,'p1':940000 ,'p2':950001})
create_and_post({'t':'line' ,'p1':950001 ,'p2':950002})
create_and_post({'t':'line' ,'p1':950002 ,'p2':950003})
create_and_post({'t':'line' ,'p1':950003 ,'p2':950005})
create_and_post({'t':'line' ,'p1':950005 ,'p2':940004})
create_and_post({'t':'line' ,'p1':950005 ,'p2':930005})
create_and_post({'t':'line' ,'p1':930005 ,'p2':920005})
create_and_post({'t':'line' ,'p1':920005 ,'p2':910005})
create_and_post({'t':'line' ,'p1':910005 ,'p2':900004})
create_and_post({'t':'line' ,'p1':840006 ,'p2':860006})
create_and_post({'t':'line' ,'p1':860006 ,'p2':870006})
create_and_post({'t':'line' ,'p1':870006 ,'p2':880006})
create_and_post({'t':'line' ,'p1':840006 ,'p2':840008})
create_and_post({'t':'line' ,'p1':840008 ,'p2':840009})
create_and_post({'t':'line' ,'p1':840009 ,'p2':840010})
create_and_post({'t':'line' ,'p1':840010 ,'p2':880010})
create_and_post({'t':'line' ,'p1':880010 ,'p2':880006})
create_and_post({'t':'line' ,'p1':880006 ,'p2':890007})
create_and_post({'t':'line' ,'p1':890007 ,'p2':890008})
create_and_post({'t':'line' ,'p1':890008 ,'p2':890009})
create_and_post({'t':'line' ,'p1':890009 ,'p2':890011})
create_and_post({'t':'line' ,'p1':890011 ,'p2':880010})
create_and_post({'t':'line' ,'p1':890011 ,'p2':870011})
create_and_post({'t':'line' ,'p1':870011 ,'p2':860011})
create_and_post({'t':'line' ,'p1':860011 ,'p2':850011})
create_and_post({'t':'line' ,'p1':850011 ,'p2':840010})
create_and_post({'t':'line' ,'p1':900006 ,'p2':920006})
create_and_post({'t':'line' ,'p1':920006 ,'p2':930006})
create_and_post({'t':'line' ,'p1':930006 ,'p2':940006})
create_and_post({'t':'line' ,'p1':900006 ,'p2':900008})
create_and_post({'t':'line' ,'p1':900008 ,'p2':900009})
create_and_post({'t':'line' ,'p1':900009 ,'p2':900010})
create_and_post({'t':'line' ,'p1':900010 ,'p2':940010})
create_and_post({'t':'line' ,'p1':940010 ,'p2':940006})
create_and_post({'t':'line' ,'p1':940006 ,'p2':950007})
create_and_post({'t':'line' ,'p1':950007 ,'p2':950008})
create_and_post({'t':'line' ,'p1':950008 ,'p2':950009})
create_and_post({'t':'line' ,'p1':950009 ,'p2':950011})
create_and_post({'t':'line' ,'p1':950011 ,'p2':940010})
create_and_post({'t':'line' ,'p1':950011 ,'p2':930011})
create_and_post({'t':'line' ,'p1':930011 ,'p2':920011})
create_and_post({'t':'line' ,'p1':920011 ,'p2':910011})
create_and_post({'t':'line' ,'p1':910011 ,'p2':900010})
create_and_post({'t':'line' ,'p1':830003 ,'p2':840003})
create_and_post({'t':'line' ,'p1':810005 ,'p2':810006})
create_and_post({'t':'line' ,'p1':860005 ,'p2':860006})
create_and_post({'t':'line' ,'p1':830008 ,'p2':840008})
create_and_post({'t':'line' ,'p1':1010003 ,'p2':1020003})
create_and_post({'t':'line' ,'p1':990005 ,'p2':990006})
create_and_post({'t':'line' ,'p1':1040005 ,'p2':1040006})
create_and_post({'t':'line' ,'p1':1010008 ,'p2':1020008})
create_and_post({'t':'line' ,'p1':960000 ,'p2':980000})
create_and_post({'t':'line' ,'p1':980000 ,'p2':990000})
create_and_post({'t':'line' ,'p1':990000 ,'p2':1000000})
create_and_post({'t':'line' ,'p1':960000 ,'p2':960002})
create_and_post({'t':'line' ,'p1':960002 ,'p2':960003})
create_and_post({'t':'line' ,'p1':960003 ,'p2':960004})
create_and_post({'t':'line' ,'p1':960004 ,'p2':1000004})
create_and_post({'t':'line' ,'p1':1000004 ,'p2':1000000})
create_and_post({'t':'line' ,'p1':1000000 ,'p2':1010001})
create_and_post({'t':'line' ,'p1':1010001 ,'p2':1010002})
create_and_post({'t':'line' ,'p1':1010002 ,'p2':1010003})
create_and_post({'t':'line' ,'p1':1010003 ,'p2':1010005})
create_and_post({'t':'line' ,'p1':1010005 ,'p2':1000004})
create_and_post({'t':'line' ,'p1':1010005 ,'p2':990005})
create_and_post({'t':'line' ,'p1':990005 ,'p2':980005})
create_and_post({'t':'line' ,'p1':980005 ,'p2':970005})
create_and_post({'t':'line' ,'p1':970005 ,'p2':960004})
create_and_post({'t':'line' ,'p1':1020000 ,'p2':1040000})
create_and_post({'t':'line' ,'p1':1040000 ,'p2':1050000})
create_and_post({'t':'line' ,'p1':1050000 ,'p2':1060000})
create_and_post({'t':'line' ,'p1':1020000 ,'p2':1020002})
create_and_post({'t':'line' ,'p1':1020002 ,'p2':1020003})
create_and_post({'t':'line' ,'p1':1020003 ,'p2':1020004})
create_and_post({'t':'line' ,'p1':1020004 ,'p2':1060004})
create_and_post({'t':'line' ,'p1':1060004 ,'p2':1060000})
create_and_post({'t':'line' ,'p1':1060000 ,'p2':1070001})
create_and_post({'t':'line' ,'p1':1070001 ,'p2':1070002})
create_and_post({'t':'line' ,'p1':1070002 ,'p2':1070003})
create_and_post({'t':'line' ,'p1':1070003 ,'p2':1070005})
create_and_post({'t':'line' ,'p1':1070005 ,'p2':1060004})
create_and_post({'t':'line' ,'p1':1070005 ,'p2':1050005})
create_and_post({'t':'line' ,'p1':1050005 ,'p2':1040005})
create_and_post({'t':'line' ,'p1':1040005 ,'p2':1030005})
create_and_post({'t':'line' ,'p1':1030005 ,'p2':1020004})
create_and_post({'t':'line' ,'p1':960006 ,'p2':980006})
create_and_post({'t':'line' ,'p1':980006 ,'p2':990006})
create_and_post({'t':'line' ,'p1':990006 ,'p2':1000006})
create_and_post({'t':'line' ,'p1':960006 ,'p2':960008})
create_and_post({'t':'line' ,'p1':960008 ,'p2':960009})
create_and_post({'t':'line' ,'p1':960009 ,'p2':960010})
create_and_post({'t':'line' ,'p1':960010 ,'p2':1000010})
create_and_post({'t':'line' ,'p1':1000010 ,'p2':1000006})
create_and_post({'t':'line' ,'p1':1000006 ,'p2':1010007})
create_and_post({'t':'line' ,'p1':1010007 ,'p2':1010008})
create_and_post({'t':'line' ,'p1':1010008 ,'p2':1010009})
create_and_post({'t':'line' ,'p1':1010009 ,'p2':1010011})
create_and_post({'t':'line' ,'p1':1010011 ,'p2':1000010})
create_and_post({'t':'line' ,'p1':1010011 ,'p2':990011})
create_and_post({'t':'line' ,'p1':990011 ,'p2':980011})
create_and_post({'t':'line' ,'p1':980011 ,'p2':970011})
create_and_post({'t':'line' ,'p1':970011 ,'p2':960010})
create_and_post({'t':'line' ,'p1':1020006 ,'p2':1040006})
create_and_post({'t':'line' ,'p1':1040006 ,'p2':1050006})
create_and_post({'t':'line' ,'p1':1050006 ,'p2':1060006})
create_and_post({'t':'line' ,'p1':1020006 ,'p2':1020008})
create_and_post({'t':'line' ,'p1':1020008 ,'p2':1020009})
create_and_post({'t':'line' ,'p1':1020009 ,'p2':1020010})
create_and_post({'t':'line' ,'p1':1020010 ,'p2':1060010})
create_and_post({'t':'line' ,'p1':1060010 ,'p2':1060006})
create_and_post({'t':'line' ,'p1':1060006 ,'p2':1070007})
create_and_post({'t':'line' ,'p1':1070007 ,'p2':1070008})
create_and_post({'t':'line' ,'p1':1070008 ,'p2':1070009})
create_and_post({'t':'line' ,'p1':1070009 ,'p2':1070011})
create_and_post({'t':'line' ,'p1':1070011 ,'p2':1060010})
create_and_post({'t':'line' ,'p1':1070011 ,'p2':1050011})
create_and_post({'t':'line' ,'p1':1050011 ,'p2':1040011})
create_and_post({'t':'line' ,'p1':1040011 ,'p2':1030011})
create_and_post({'t':'line' ,'p1':1030011 ,'p2':1020010})
create_and_post({'t':'line' ,'p1':950003 ,'p2':960003})
create_and_post({'t':'line' ,'p1':930005 ,'p2':930006})
create_and_post({'t':'line' ,'p1':980005 ,'p2':980006})
create_and_post({'t':'line' ,'p1':950008 ,'p2':960008})
create_and_post({'t':'line' ,'p1':50003 ,'p2':60003})
create_and_post({'t':'line' ,'p1':30005 ,'p2':30006})
create_and_post({'t':'line' ,'p1':80005 ,'p2':80006})
create_and_post({'t':'line' ,'p1':50008 ,'p2':60008})
create_and_post({'t':'line' ,'p1':0 ,'p2':20000})
create_and_post({'t':'line' ,'p1':20000 ,'p2':30000})
create_and_post({'t':'line' ,'p1':30000 ,'p2':40000})
create_and_post({'t':'line' ,'p1':0 ,'p2':2})
create_and_post({'t':'line' ,'p1':2 ,'p2':3})
create_and_post({'t':'line' ,'p1':3 ,'p2':4})
create_and_post({'t':'line' ,'p1':4 ,'p2':40004})
create_and_post({'t':'line' ,'p1':40004 ,'p2':40000})
create_and_post({'t':'line' ,'p1':40000 ,'p2':50001})
create_and_post({'t':'line' ,'p1':50001 ,'p2':50002})
create_and_post({'t':'line' ,'p1':50002 ,'p2':50003})
create_and_post({'t':'line' ,'p1':50003 ,'p2':50005})
create_and_post({'t':'line' ,'p1':50005 ,'p2':40004})
create_and_post({'t':'line' ,'p1':50005 ,'p2':30005})
create_and_post({'t':'line' ,'p1':30005 ,'p2':20005})
create_and_post({'t':'line' ,'p1':20005 ,'p2':10005})
create_and_post({'t':'line' ,'p1':10005 ,'p2':4})
create_and_post({'t':'line' ,'p1':60000 ,'p2':80000})
create_and_post({'t':'line' ,'p1':80000 ,'p2':90000})
create_and_post({'t':'line' ,'p1':90000 ,'p2':100000})
create_and_post({'t':'line' ,'p1':60000 ,'p2':60002})
create_and_post({'t':'line' ,'p1':60002 ,'p2':60003})
create_and_post({'t':'line' ,'p1':60003 ,'p2':60004})
create_and_post({'t':'line' ,'p1':60004 ,'p2':100004})
create_and_post({'t':'line' ,'p1':100004 ,'p2':100000})
create_and_post({'t':'line' ,'p1':100000 ,'p2':110001})
create_and_post({'t':'line' ,'p1':110001 ,'p2':110002})
create_and_post({'t':'line' ,'p1':110002 ,'p2':110003})
create_and_post({'t':'line' ,'p1':110003 ,'p2':110005})
create_and_post({'t':'line' ,'p1':110005 ,'p2':100004})
create_and_post({'t':'line' ,'p1':110005 ,'p2':90005})
create_and_post({'t':'line' ,'p1':90005 ,'p2':80005})
create_and_post({'t':'line' ,'p1':80005 ,'p2':70005})
create_and_post({'t':'line' ,'p1':70005 ,'p2':60004})
create_and_post({'t':'line' ,'p1':6 ,'p2':20006})
create_and_post({'t':'line' ,'p1':20006 ,'p2':30006})
create_and_post({'t':'line' ,'p1':30006 ,'p2':40006})
create_and_post({'t':'line' ,'p1':6 ,'p2':8})
create_and_post({'t':'line' ,'p1':8 ,'p2':9})
create_and_post({'t':'line' ,'p1':9 ,'p2':10})
create_and_post({'t':'line' ,'p1':10 ,'p2':40010})
create_and_post({'t':'line' ,'p1':40010 ,'p2':40006})
create_and_post({'t':'line' ,'p1':40006 ,'p2':50007})
create_and_post({'t':'line' ,'p1':50007 ,'p2':50008})
create_and_post({'t':'line' ,'p1':50008 ,'p2':50009})
create_and_post({'t':'line' ,'p1':50009 ,'p2':50011})
create_and_post({'t':'line' ,'p1':50011 ,'p2':40010})
create_and_post({'t':'line' ,'p1':50011 ,'p2':30011})
create_and_post({'t':'line' ,'p1':30011 ,'p2':20011})
create_and_post({'t':'line' ,'p1':20011 ,'p2':10011})
create_and_post({'t':'line' ,'p1':10011 ,'p2':10})
create_and_post({'t':'line' ,'p1':60006 ,'p2':80006})
create_and_post({'t':'line' ,'p1':80006 ,'p2':90006})
create_and_post({'t':'line' ,'p1':90006 ,'p2':100006})
create_and_post({'t':'line' ,'p1':60006 ,'p2':60008})
create_and_post({'t':'line' ,'p1':60008 ,'p2':60009})
create_and_post({'t':'line' ,'p1':60009 ,'p2':60010})
create_and_post({'t':'line' ,'p1':60010 ,'p2':100010})
create_and_post({'t':'line' ,'p1':100010 ,'p2':100006})
create_and_post({'t':'line' ,'p1':100006 ,'p2':110007})
create_and_post({'t':'line' ,'p1':110007 ,'p2':110008})
create_and_post({'t':'line' ,'p1':110008 ,'p2':110009})
create_and_post({'t':'line' ,'p1':110009 ,'p2':110011})
create_and_post({'t':'line' ,'p1':110011 ,'p2':100010})
create_and_post({'t':'line' ,'p1':110011 ,'p2':90011})
create_and_post({'t':'line' ,'p1':90011 ,'p2':80011})
create_and_post({'t':'line' ,'p1':80011 ,'p2':70011})
create_and_post({'t':'line' ,'p1':70011 ,'p2':60010})
create_and_post({'t':'line' ,'p1':170003 ,'p2':180003})
create_and_post({'t':'line' ,'p1':150005 ,'p2':150006})
create_and_post({'t':'line' ,'p1':200005 ,'p2':200006})
create_and_post({'t':'line' ,'p1':170008 ,'p2':180008})
create_and_post({'t':'line' ,'p1':120000 ,'p2':140000})
create_and_post({'t':'line' ,'p1':140000 ,'p2':150000})
create_and_post({'t':'line' ,'p1':150000 ,'p2':160000})
create_and_post({'t':'line' ,'p1':120000 ,'p2':120002})
create_and_post({'t':'line' ,'p1':120002 ,'p2':120003})
create_and_post({'t':'line' ,'p1':120003 ,'p2':120004})
create_and_post({'t':'line' ,'p1':120004 ,'p2':160004})
create_and_post({'t':'line' ,'p1':160004 ,'p2':160000})
create_and_post({'t':'line' ,'p1':160000 ,'p2':170001})
create_and_post({'t':'line' ,'p1':170001 ,'p2':170002})
create_and_post({'t':'line' ,'p1':170002 ,'p2':170003})
create_and_post({'t':'line' ,'p1':170003 ,'p2':170005})
create_and_post({'t':'line' ,'p1':170005 ,'p2':160004})
create_and_post({'t':'line' ,'p1':170005 ,'p2':150005})
create_and_post({'t':'line' ,'p1':150005 ,'p2':140005})
create_and_post({'t':'line' ,'p1':140005 ,'p2':130005})
create_and_post({'t':'line' ,'p1':130005 ,'p2':120004})
create_and_post({'t':'line' ,'p1':180000 ,'p2':200000})
create_and_post({'t':'line' ,'p1':200000 ,'p2':210000})
create_and_post({'t':'line' ,'p1':210000 ,'p2':220000})
create_and_post({'t':'line' ,'p1':180000 ,'p2':180002})
create_and_post({'t':'line' ,'p1':180002 ,'p2':180003})
create_and_post({'t':'line' ,'p1':180003 ,'p2':180004})
create_and_post({'t':'line' ,'p1':180004 ,'p2':220004})
create_and_post({'t':'line' ,'p1':220004 ,'p2':220000})
create_and_post({'t':'line' ,'p1':220000 ,'p2':230001})
create_and_post({'t':'line' ,'p1':230001 ,'p2':230002})
create_and_post({'t':'line' ,'p1':230002 ,'p2':230003})
create_and_post({'t':'line' ,'p1':230003 ,'p2':230005})
create_and_post({'t':'line' ,'p1':230005 ,'p2':220004})
create_and_post({'t':'line' ,'p1':230005 ,'p2':210005})
create_and_post({'t':'line' ,'p1':210005 ,'p2':200005})
create_and_post({'t':'line' ,'p1':200005 ,'p2':190005})
create_and_post({'t':'line' ,'p1':190005 ,'p2':180004})
create_and_post({'t':'line' ,'p1':120006 ,'p2':140006})
create_and_post({'t':'line' ,'p1':140006 ,'p2':150006})
create_and_post({'t':'line' ,'p1':150006 ,'p2':160006})
create_and_post({'t':'line' ,'p1':120006 ,'p2':120008})
create_and_post({'t':'line' ,'p1':120008 ,'p2':120009})
create_and_post({'t':'line' ,'p1':120009 ,'p2':120010})
create_and_post({'t':'line' ,'p1':120010 ,'p2':160010})
create_and_post({'t':'line' ,'p1':160010 ,'p2':160006})
create_and_post({'t':'line' ,'p1':160006 ,'p2':170007})
create_and_post({'t':'line' ,'p1':170007 ,'p2':170008})
create_and_post({'t':'line' ,'p1':170008 ,'p2':170009})
create_and_post({'t':'line' ,'p1':170009 ,'p2':170011})
create_and_post({'t':'line' ,'p1':170011 ,'p2':160010})
create_and_post({'t':'line' ,'p1':170011 ,'p2':150011})
create_and_post({'t':'line' ,'p1':150011 ,'p2':140011})
create_and_post({'t':'line' ,'p1':140011 ,'p2':130011})
create_and_post({'t':'line' ,'p1':130011 ,'p2':120010})
create_and_post({'t':'line' ,'p1':180006 ,'p2':200006})
create_and_post({'t':'line' ,'p1':200006 ,'p2':210006})
create_and_post({'t':'line' ,'p1':210006 ,'p2':220006})
create_and_post({'t':'line' ,'p1':180006 ,'p2':180008})
create_and_post({'t':'line' ,'p1':180008 ,'p2':180009})
create_and_post({'t':'line' ,'p1':180009 ,'p2':180010})
create_and_post({'t':'line' ,'p1':180010 ,'p2':220010})
create_and_post({'t':'line' ,'p1':220010 ,'p2':220006})
create_and_post({'t':'line' ,'p1':220006 ,'p2':230007})
create_and_post({'t':'line' ,'p1':230007 ,'p2':230008})
create_and_post({'t':'line' ,'p1':230008 ,'p2':230009})
create_and_post({'t':'line' ,'p1':230009 ,'p2':230011})
create_and_post({'t':'line' ,'p1':230011 ,'p2':220010})
create_and_post({'t':'line' ,'p1':230011 ,'p2':210011})
create_and_post({'t':'line' ,'p1':210011 ,'p2':200011})
create_and_post({'t':'line' ,'p1':200011 ,'p2':190011})
create_and_post({'t':'line' ,'p1':190011 ,'p2':180010})
create_and_post({'t':'line' ,'p1':110003 ,'p2':120003})
create_and_post({'t':'line' ,'p1':90005 ,'p2':90006})
create_and_post({'t':'line' ,'p1':140005 ,'p2':140006})
create_and_post({'t':'line' ,'p1':110008 ,'p2':120008})
create_and_post({'t':'line' ,'p1':290003 ,'p2':300003})
create_and_post({'t':'line' ,'p1':270005 ,'p2':270006})
create_and_post({'t':'line' ,'p1':320005 ,'p2':320006})
create_and_post({'t':'line' ,'p1':290008 ,'p2':300008})
create_and_post({'t':'line' ,'p1':240000 ,'p2':260000})
create_and_post({'t':'line' ,'p1':260000 ,'p2':270000})
create_and_post({'t':'line' ,'p1':270000 ,'p2':280000})
create_and_post({'t':'line' ,'p1':240000 ,'p2':240002})
create_and_post({'t':'line' ,'p1':240002 ,'p2':240003})
create_and_post({'t':'line' ,'p1':240003 ,'p2':240004})
create_and_post({'t':'line' ,'p1':240004 ,'p2':280004})
create_and_post({'t':'line' ,'p1':280004 ,'p2':280000})
create_and_post({'t':'line' ,'p1':280000 ,'p2':290001})
create_and_post({'t':'line' ,'p1':290001 ,'p2':290002})
create_and_post({'t':'line' ,'p1':290002 ,'p2':290003})
create_and_post({'t':'line' ,'p1':290003 ,'p2':290005})
create_and_post({'t':'line' ,'p1':290005 ,'p2':280004})
create_and_post({'t':'line' ,'p1':290005 ,'p2':270005})
create_and_post({'t':'line' ,'p1':270005 ,'p2':260005})
create_and_post({'t':'line' ,'p1':260005 ,'p2':250005})
create_and_post({'t':'line' ,'p1':250005 ,'p2':240004})
create_and_post({'t':'line' ,'p1':300000 ,'p2':320000})
create_and_post({'t':'line' ,'p1':320000 ,'p2':330000})
create_and_post({'t':'line' ,'p1':330000 ,'p2':340000})
create_and_post({'t':'line' ,'p1':300000 ,'p2':300002})
create_and_post({'t':'line' ,'p1':300002 ,'p2':300003})
create_and_post({'t':'line' ,'p1':300003 ,'p2':300004})
create_and_post({'t':'line' ,'p1':300004 ,'p2':340004})
create_and_post({'t':'line' ,'p1':340004 ,'p2':340000})
create_and_post({'t':'line' ,'p1':340000 | |
import json
import logging
import os
import re
from collections import namedtuple
from copy import deepcopy
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
import spacy
from scirex_utilities.analyse_pwc_entity_results import *
from scirex_utilities.entity_utils import *
from spacy.tokens import Doc
from tqdm import tqdm
tqdm.pandas()
LabelSpan = namedtuple("Span", ["start", "end", "token_start", "token_end", "entity", "links", "modified"])
logging.basicConfig(level=logging.INFO)
class WhitespaceTokenizer(object):
def __init__(self, vocab):
self.vocab = vocab
def __call__(self, text):
words = text.split()
# All tokens 'own' a subsequent space character in this tokenizer
spaces = [True] * len(words)
return Doc(self.vocab, words=words, spaces=spaces)
nlp = spacy.load("en")
nlp.tokenizer = WhitespaceTokenizer(nlp.vocab)
def process_folder(folder: str) -> Tuple[dict, str]:
span_labels = {}
map_T_to_span = {}
if not os.path.isdir(folder) or "document.txt" not in os.listdir(folder):
print(folder, " have not document")
return None
doc_text = open(os.path.join(folder, "document.txt")).read()
ann_file = open(os.path.join(folder, "document.ann")).read().strip()
annotations = [x.split("\t", 1) for x in ann_file.split("\n")]
annotations = sorted(annotations, key=lambda x: 0 if x[0] == "T" else 1)
for ann_type, ann in annotations:
if ann_type[0] == "T":
ann, ann_text = ann.split("\t")
if ";" in ann:
continue
else:
enttype, span_start, span_end = ann.split()
span_start, span_end = int(span_start), int(span_end)
if (span_start, span_end) in span_labels:
assert "Span already present"
else:
span_labels[(span_start, span_end)] = {"E": enttype, "A": set(), "T": ann_text}
map_T_to_span[ann_type] = (span_start, span_end)
if ann_type[0] == "A":
ann, ann_T = ann.split()
if ann_T in map_T_to_span:
span_labels[map_T_to_span[ann_T]]["A"].add(ann)
else:
print("Attribute before Trigger")
return span_labels, doc_text
def get_all_document_annotations(brat_folder: str) -> Dict[str, Tuple[dict, str]]:
map_id_to_ann = {}
for f in tqdm(os.listdir(brat_folder)):
try:
map_id_to_ann[f] = process_folder(os.path.join(brat_folder, f))
except Exception as e:
print(f)
return map_id_to_ann
def process_back_to_dataframe(span_labels: Dict[Tuple[int, int], dict], doc_text: str):
sentences = doc_text.split("\n ")
assert sentences[-1] == ""
sentences = [x + "\n " for x in sentences[:-1]]
sentence_limits = np.cumsum([len(x) for x in sentences])
sentence_limits = list(zip([0] + list(sentence_limits)[:-1], sentence_limits))
for s, e in sentence_limits:
assert doc_text[e - 2 : e] == "\n "
assert doc_text[s] != " "
span_labels = list(map(lambda x: [list(x[0]), x[1]], sorted(span_labels.items(), key=lambda x: x[0][0])))
sl_ix = 0
map_sentence_limits_to_spans = {}
for ss, se in sentence_limits:
map_sentence_limits_to_spans[(ss, se)] = []
while sl_ix < len(span_labels) and span_labels[sl_ix][0][0] >= ss and span_labels[sl_ix][0][1] <= se:
map_sentence_limits_to_spans[(ss, se)].append(span_labels[sl_ix])
sl_ix += 1
spans_in_l = 0
for k, v in map_sentence_limits_to_spans.items():
for span, _ in v:
assert k[0] <= span[0] and k[1] >= span[1]
spans_in_l += 1
assert span[1] < k[1] - 1
assert spans_in_l == len(span_labels)
for k, v in map_sentence_limits_to_spans.items():
for span, _ in v:
span[0] -= k[0]
span[1] -= k[0]
df = []
for sent_id, ((ss, se), st) in enumerate(zip(sentence_limits, sentences)):
for span, d in map_sentence_limits_to_spans[(ss, se)]:
assert st[-2:] == "\n ", st[-2:]
assert span[1] < len(st) - 2
assert st[span[0] : span[1]] == d["T"] and len(d["T"]) > 0, (st[span[0] : span[1]], d["T"])
df.append({"sentence": st, "spans": map_sentence_limits_to_spans[(ss, se)], "sentence_id": sent_id})
assert df[4]["sentence"].strip() == "", breakpoint()
df = df[5:]
df = pd.DataFrame(df)
return df
def get_dataframe_from_folder(brat_folder):
logging.info("Generating DataFrame ...")
map_changes = get_all_document_annotations(brat_folder)
logging.info("Done generating DataFrame")
doc_df = []
for k in tqdm(map_changes):
if map_changes[k] is None:
continue
df = process_back_to_dataframe(*map_changes[k])
df["doc_id"] = k
doc_df.append(df)
doc_df = pd.concat(doc_df)
return doc_df
def overlap(span_1, span_2):
if span_1[0] >= span_2[1] or span_2[0] >= span_1[1]:
return False
return True
def process_cluster(cluster):
stats = {
"new_spans": len([x for x in cluster if "pre" not in x[1]]),
"old_spans": len([x for x in cluster if "pre" in x[1]]),
"type_change": 0,
"change_attributes": 0,
}
old_spans = [x for x in cluster if "pre" in x[1]]
new_spans = [x for x in cluster if "pre" not in x[1]]
old_spans_modified, old_spans_unmodified = [], []
for span, info in old_spans:
if [info[k] for k in ["E", "T", "A"]] == [info["pre"][k] for k in ["E", "T", "A"]]:
del info["pre"]
if any(overlap(span, n_span) for n_span, _ in new_spans):
continue
old_spans_unmodified.append((span, info))
else:
del info["pre"]
if any(overlap(span, n_span) for n_span, _ in new_spans):
continue
old_spans_modified.append((span, info))
assert all((si == sj or not overlap(si[0], sj[0])) for si in new_spans for sj in new_spans), breakpoint()
assert len(old_spans_unmodified) == 0 or len(old_spans_modified) == 0, breakpoint()
assert all(
(not overlap(ospan, nspan)) for ospan, _ in old_spans_modified for nspan, _ in new_spans
), breakpoint()
assert all(
(not overlap(ospan, nspan)) for ospan, _ in old_spans_unmodified for nspan, _ in new_spans
), breakpoint()
if len(old_spans_modified + old_spans_unmodified) > 0 and len(new_spans) > 0:
breakpoint()
new_spans = [
LabelSpan(
start=x[0][0],
end=x[0][1],
entity=x[1]["E"],
links=x[1]["A"],
token_start=None,
token_end=None,
modified=True,
)._asdict()
for x in new_spans + old_spans_modified
]
new_spans += [
LabelSpan(
start=x[0][0],
end=x[0][1],
entity=x[1]["E"],
links=x[1]["A"],
token_start=None,
token_end=None,
modified=False,
)._asdict()
for x in old_spans_unmodified
]
stats["spans_kept"] = len(new_spans)
return new_spans, stats
# Cases 1 : Pre entity have labels / post don't -> copy labels / delete pre entity
# Cases 2 : Pre entity have labels / post also have labels -> don't copy labels / delete pre entity
# Cases 3 : If post entity have different type than pre entity, remove pre entity
def normalize_spans(row):
span_list_1, span_list_2 = row["spans_old"], row["spans_new"]
map_1_span_to_ix = {tuple(k): v for k, v in span_list_1}
if len(span_list_2) == 0:
return [], None
spans = [tuple(x[0]) for x in span_list_2]
if len(spans) != len(set(spans)):
assert "Duplicate spans", span_list_2
span_list_2 = sorted(span_list_2, key=lambda x: x[0])
stats = []
clusters = []
curr_cluster = []
cstart, cend = -1, -1
for (start, end), span_info in span_list_2:
cspan = ((start, end), span_info)
if (start, end) in map_1_span_to_ix:
span_info["pre"] = map_1_span_to_ix[(start, end)]
if cstart == -1: # (Start First Cluster)
curr_cluster.append(cspan)
cstart, cend = start, end
elif start < cend: # Append to current cluster
curr_cluster.append(cspan)
cend = max(cend, end)
else: # Start new cluster
curr_cluster, cluster_stats = process_cluster(curr_cluster)
stats.append(cluster_stats)
clusters.append(curr_cluster)
curr_cluster = [cspan]
cstart, cend = start, end
curr_cluster, cluster_stats = process_cluster(curr_cluster)
stats.append(cluster_stats)
clusters.append(curr_cluster)
clusters = sorted([z for x in clusters for z in x], key=lambda x: (x["start"], x["end"]))
for i in range(len(clusters) - 1):
if clusters[i]["end"] > clusters[i + 1]["start"]:
breakpoint()
stats_reduced = {}
for s in stats:
for k, v in s.items():
if k not in stats_reduced:
stats_reduced[k] = v
else:
stats_reduced[k] += v
return clusters, stats_reduced
def add_token_index(row):
if len(row["cluster"]) == 0:
return []
sentence = row["sentence_old"]
words = row["words"]
word_indices = row["word_indices"]
sentence_start = row["sentence_start"]
starts, ends = list(zip(*word_indices))
for i, (start, end) in enumerate(zip(starts, ends)):
assert sentence[start:end] == words[i], breakpoint()
new_cluster = []
cluster = row["cluster"]
for i, span in enumerate(cluster):
assert "start" in span, breakpoint()
assert "end" in span, breakpoint()
if not (span["start"] in starts):
if sentence[span["start"]].strip() == "":
span["start"] += 1
else:
span["start"] = min(
starts, key=lambda x: abs(x - span["start"]) if x < span["start"] else float("inf")
)
if not (span["end"] in ends):
if sentence[span["end"] - 1].strip() == "":
span["end"] -= 1
else:
span["end"] = min(
ends, key=lambda x: abs(x - span["end"]) if x > span["end"] else float("inf")
)
span["token_start"] = starts.index(span["start"]) + sentence_start - len(words)
span["token_end"] = ends.index(span["end"]) + 1 + sentence_start - len(words)
for cleaned_span in new_cluster:
if overlap(
(span["token_start"], span["token_end"]),
(cleaned_span["token_start"], cleaned_span["token_end"]),
):
print(row["doc_id"])
print(" ".join(row["words"]))
print("=" * 20)
new_cluster.append(span)
return new_cluster
def generate_token_and_indices(sentence):
words = sorted(
[(m.group(0), (m.start(), m.end())) for m in re.finditer(r"[^\s\+\-/\(\)&\[\],]+", sentence)]
+ [(m.group(0), (m.start(), m.end())) for m in re.finditer(r"[\+\-/\(\)&\[\],]+", sentence)]
+ [(m.group(0), (m.start(), m.end())) for m in re.finditer(r"\s+", sentence)],
key=lambda x: x[1],
)
if len(words) == 0 or sentence.strip() == "":
return [], []
try:
words, indices = list(zip(*[(t, i) for t, i in words if t.strip() != ""]))
except:
breakpoint()
return words, indices
def compare_brat_annotations(ann_old_df, ann_new_df):
df_merged = ann_old_df.merge(ann_new_df, on=["doc_id", "sentence_id"], suffixes=("_old", "_new"))
logging.info("Applying Normalize Spans ...")
output = df_merged.progress_apply(normalize_spans, axis=1)
df_merged["cluster"], df_merged["stats"] = list(zip(*output))
df_merged = df_merged.sort_values(["doc_id", "sentence_id"]).reset_index(drop=True)
logging.info("Applying Add Token Index ...")
df_merged["words"], df_merged["word_indices"] = list(
zip(*df_merged["sentence_old"].progress_apply(generate_token_and_indices))
)
df_merged["num_words"] = df_merged["words"].progress_apply(len)
df_merged["sentence_start"] = df_merged.groupby("doc_id")["num_words"].cumsum()
df_merged["entities"] = df_merged.apply(add_token_index, axis=1)
df_merged = (
df_merged.sort_values(["doc_id", "sentence_id"])
.reset_index(drop=True)
.drop(columns=["spans_old", "spans_new", "sentence_new", "cluster"])
.rename(columns={"sentence_old": "sentence"})
)
return df_merged
def generate_relations_in_pwc_df(pwc_df):
pwc_df_keep = pwc_df[["s2_paper_id"] + true_entities + ["score"]].rename(
columns=map_true_entity_to_available
)
pwc_df_keep = (
pwc_df_keep[(~pwc_df_keep.duplicated()) & (pwc_df_keep.s2_paper_id != "not_found")]
.sort_values(["s2_paper_id"] + used_entities + ["score"])
.reset_index(drop=True)
)
# pwc_df_keep[used_entities] = pwc_df_keep[used_entities].applymap(lambda x: re.sub(r"[^\w-]", "_", x))
pwc_df_keep = (
pwc_df_keep.groupby("s2_paper_id")
.apply(lambda x: list(x[used_entities + ["score"]].itertuples(index=False, name="Relation")))
.reset_index()
.rename(columns={0: "Relations"})
)
return pwc_df_keep
def combine_brat_to_original_data(
pwc_doc_file,
pwc_sentence_file,
pwc_prediction_file,
original_brat_anno_folder,
| |
<reponame>sookido/co2meter
""" Flask server for CO2meter
(c) <NAME>, 2018
E-mail: <EMAIL>
"""
import optparse
import logging
import threading
import time
import glob
import os
import socket
import signal
import json
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import flask
from flask import request, render_template, jsonify
import pandas as pd
import co2meter as co2
_DEFAULT_HOST = '127.0.0.1'
_DEFAULT_PORT = '1201'
_DEFAULT_INTERVAL = 30 # seconds
_DEFAULT_NAME = 'co2'
_INIT_TIME = 30 # time to initialize and calibrate device
_URL = 'https://github.com/vfilimonov/co2meter'
_COLORS = {'r': '#E81F2E', 'y': '#FAAF4C', 'g': '#7FB03F'}
_IMG_G = '1324881/36358454-d707e2f4-150e-11e8-9bd1-b479e232f28f'
_IMG_Y = '1324881/36358456-d8b513ba-150e-11e8-91eb-ade37733b19e'
_IMG_R = '1324881/36358457-da3e3e8c-150e-11e8-85af-855571275d88'
_RANGE_MID = [800, 1200]
_CO2_MAX_VALUE = 3200 # Cut our yaxis here
_name = _DEFAULT_NAME
###############################################################################
mon = None
###############################################################################
app = flask.Flask(__name__)
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
###############################################################################
@app.route('/')
def home():
# Read CO2 and temp values
if mon is None:
status = '<h1 align="center" style="color:%s;">Device is not connected</h1>' % _COLORS['r']
else:
status = ''
try:
vals = list(mon._last_data)
vals[-1] = '%.1f' % vals[-1]
except:
data = read_logs()
vals = data.split('\n')[-2].split(',')
if status == '':
status = '<h1 align="center" style="color:%s;">Device is not ready</h1>' % _COLORS['r']
# Select image and color
if int(vals[1]) >= _RANGE_MID[1]:
color = _COLORS['r']
img = _IMG_R
elif int(vals[1]) < _RANGE_MID[0]:
color = _COLORS['g']
img = _IMG_G
else:
color = _COLORS['y']
img = _IMG_Y
co2 = '<font color="%s">%s ppm</font>' % (color, vals[1])
# Return template
return render_template('index.html', image=img, timestamp=vals[0],
co2=vals[1], color=color, temp=vals[2], url=_URL,
status=status)
#############################################################################
@app.route('/log', defaults={'logname': None})
@app.route('/log/<string:logname>')
def log(logname):
data = read_logs(name=logname)
return '<h1>Full log</h1>' + wrap_table(data)
@app.route('/log.csv', defaults={'logname': None})
@app.route('/log/<string:logname>.csv')
def log_csv(logname):
data = read_logs(name=logname)
return wrap_csv(data, logname)
@app.route('/log.json', defaults={'logname': None})
@app.route('/log/<string:logname>.json')
def log_json(logname):
data = read_logs(name=logname)
return wrap_json(data)
#############################################################################
@app.route('/rename')
def get_shape_positions():
args = request.args
logging.info('rename', args.to_dict())
new_name = args.get('name', default=None, type=str)
if new_name is None:
return 'Error: new log name is not specified!'
global _name
_name = new_name
return 'Log name has changed to "%s"' % _name
#############################################################################
@app.route('/kill')
def shutdown():
server_stop()
global _monitoring
_monitoring = False
return 'Server shutting down...'
#############################################################################
# Dashboard on plotly.js
#############################################################################
def prepare_data(name=None, span='24H'):
data = read_logs(name)
data = pd.read_csv(StringIO(data), parse_dates=[0]).set_index('timestamp')
if span != 'FULL':
data = data.last(span)
if span == '24H':
data = data.resample('60s').mean()
elif span == '7D':
data = data.resample('600s').mean()
elif span == '30D':
data = data.resample('1H').mean()
elif span == 'FULL':
if len(data) > 3000: # Resample only long series
data = data.resample('1H').mean()
data = data.round({'co2': 0, 'temp': 1})
return data
def rect(y0, y1, color):
return {'type': 'rect', 'layer': 'below',
'xref': 'paper', 'x0': 0, 'x1': 1,
'yref': 'y', 'y0': y0, 'y1': y1,
'fillcolor': color, 'opacity': 0.2, 'line': {'width': 0}}
def caption(title, x, y):
return {'xref': 'paper', 'yref': 'paper', 'x': x, 'y': y, 'text': title,
'showarrow': False, 'font': {'size': 16},
'xanchor': 'center', 'yanchor': 'bottom'}
#############################################################################
@app.route("/chart/", strict_slashes=False)
@app.route("/chart/<name>", strict_slashes=False)
@app.route("/chart/<name>/<freq>", strict_slashes=False)
def chart_co2_temp(name=None, freq='24H'):
data = prepare_data(name, freq)
co2_min = min(500, data['co2'].min() - 50)
co2_max = min(max(2000, data['co2'].max() + 50), _CO2_MAX_VALUE)
t_min = min(15, data['temp'].min())
t_max = max(27, data['temp'].max())
rect_green = rect(co2_min, _RANGE_MID[0], _COLORS['g'])
rect_yellow = rect(_RANGE_MID[0], _RANGE_MID[1], _COLORS['y'])
rect_red = rect(_RANGE_MID[1], co2_max, _COLORS['r'])
# Check if mobile
try:
agent = request.headers.get('User-Agent')
phones = ['iphone', 'android', 'blackberry', 'fennec', 'iemobile']
staticPlot = any(phone in agent.lower() for phone in phones)
except RuntimeError:
staticPlot = False
# Make figure
index = data.index.format()
co2 = list(pd.np.where(data.co2.isnull(), None, data.co2))
temp = list(pd.np.where(data.temp.isnull(), None, data.temp))
d_co2 = {'mode': 'lines+markers', 'type': 'scatter',
'name': 'CO2 concentration',
'xaxis': 'x1', 'yaxis': 'y1',
'x': index, 'y': co2}
d_temp = {'mode': 'lines+markers', 'type': 'scatter',
'name': 'Temperature',
'xaxis': 'x1', 'yaxis': 'y2',
'x': index, 'y': temp}
config = {'displayModeBar': False, 'staticPlot': staticPlot}
layout = {'margin': {'l': 30, 'r': 10, 'b': 30, 't': 30},
'showlegend': False,
'shapes': [rect_green, rect_yellow, rect_red],
'xaxis1': {'domain': [0, 1], 'anchor': 'y2'},
'yaxis1': {'domain': [0.55, 1], 'anchor': 'free', 'position': 0,
'range': [co2_min, co2_max]},
'yaxis2': {'domain': [0, 0.45], 'anchor': 'x1',
'range': [t_min, t_max]},
'annotations': [caption('CO2 concentration', 0.5, 1),
caption('Temperature', 0.5, 0.45)]
}
fig = {'data': [d_co2, d_temp], 'layout': layout, 'config': config}
return jsonify(fig)
#############################################################################
@app.route("/dashboard")
def dashboard_plotly():
# Get list of files
files = glob.glob('logs/*.csv')
files = [os.path.splitext(os.path.basename(_))[0] for _ in files]
# And find selected for jinja template
files = [(_, _ == _name) for _ in files]
return render_template('dashboard.html', files=files)
#############################################################################
# Monitoring routines
#############################################################################
def read_logs(name=None):
""" read log files """
if name is None:
name = _name
with open(os.path.join('logs', name + '.csv'), 'r') as f:
data = f.read()
return data
#############################################################################
def write_to_log(vals):
""" file name for a current log """
# Create file if does not exist
fname = os.path.join('logs', _name + '.csv')
if not os.path.exists('logs'):
os.makedirs('logs')
if not os.path.isfile(fname):
with open(fname, 'a') as f:
f.write('timestamp,co2,temp\n')
# Append to file
with open(fname, 'a') as f:
f.write('%s,%d,%.1f\n' % vals)
def read_co2_data():
""" A small hack to read co2 data from monitor in order to account for case
when monitor is not initialized yet
"""
global mon
if mon is None:
# Try to initialize
try:
mon = co2.CO2monitor()
# Sleep. If we read from device before it is calibrated, we'll
# get wrong values
time.sleep(_INIT_TIME)
except OSError:
return None
try:
return mon.read_data_raw(max_requests=1000)
except OSError:
# We kill the link and will require to initialize monitor again next time
mon = None
return None
def monitoring_CO2(interval):
""" Tread for monitoring / logging """
while _monitoring:
# Request concentration and temperature
vals = read_co2_data()
if vals is None:
logging.info('[%s] monitor is not connected' % co2.now())
else:
# Write to log and sleep
logging.info('[%s] %d ppm, %.1f deg C' % tuple(vals))
write_to_log(vals)
# Sleep for the next call
time.sleep(interval)
#############################################################################
def start_monitor(interval=_DEFAULT_INTERVAL):
""" Start CO2 monitoring in a thread """
logging.basicConfig(level=logging.INFO)
global _monitoring
_monitoring = True
t = threading.Thread(target=monitoring_CO2, args=(interval,))
t.start()
return t
#############################################################################
def init_homekit_target(port, host):
try:
from .homekit import start_homekit
except:
from homekit import start_homekit
global mon
while mon is None:
time.sleep(5)
logging.info('Starting homekit server')
start_homekit(mon, host=host, port=port, monitoring=False, handle_sigint=False)
def init_homekit(port, host):
# We'll start homekit once the device is connected
t = threading.Thread(target=init_homekit_target, args=(port, host, ))
t.start()
#############################################################################
# Server routines
#############################################################################
def my_ip():
""" Get my local IP address """
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.connect(("8.8.8.8", 80)) # Google Public DNS
return s.getsockname()[0]
def start_server_homekit():
""" Start monitoring, flask/dash server and homekit accessory """
# Based on http://flask.pocoo.org/snippets/133/
try:
from .homekit import PORT
except:
# the case of running not from the installed module
from homekit import PORT
host = my_ip()
parser = optparse.OptionParser()
parser.add_option("-H", "--host",
help="Hostname of the Flask app [default %s]" % host,
default=host)
parser.add_option("-P", "--port-flask",
help="Port for the Flask app [default %s]" % _DEFAULT_PORT,
default=_DEFAULT_PORT)
parser.add_option("-K", "--port-homekit",
help="Port for the Homekit accessory [default %s]" % PORT,
default=PORT)
parser.add_option("-N", "--name",
help="Name for the log file [default %s]" % _DEFAULT_NAME,
default=_DEFAULT_NAME)
options, _ = parser.parse_args()
global _name
_name = options.name
# Start monitoring
t_monitor = start_monitor()
# Start a thread that will initialize homekit once device is connected
init_homekit(host=options.host, port=int(options.port_homekit))
# Start server
app.run(host=options.host, port=int(options.port_flask))
#############################################################################
def start_server():
""" Runs Flask instance using command line arguments """
# Based on http://flask.pocoo.org/snippets/133/
parser = optparse.OptionParser()
parser.add_option("-H", "--host",
help="Hostname of the Flask app [default %s]" % _DEFAULT_HOST,
default=_DEFAULT_HOST)
parser.add_option("-P", "--port",
help="Port for the Flask app [default %s]" % _DEFAULT_PORT,
default=_DEFAULT_PORT)
parser.add_option("-I", "--interval",
help="Interval in seconds for CO2meter requests [default %d]" % _DEFAULT_INTERVAL,
default=_DEFAULT_INTERVAL)
parser.add_option("-N", "--name",
help="Name for the log file [default %s]" % _DEFAULT_NAME,
default=_DEFAULT_NAME)
parser.add_option("-m", "--nomonitoring",
help="No live monitoring (only flask server)",
action="store_true", dest="no_monitoring")
parser.add_option("-s", "--noserver",
help="No server (only monitoring to file)",
action="store_true", dest="no_server")
parser.add_option("-d", "--debug",
action="store_true", dest="debug",
help=optparse.SUPPRESS_HELP)
options, _ = parser.parse_args()
if options.debug and not options.no_monitoring:
parser.error("--debug option could be used only with --no_monitoring")
global _name
_name = options.name
# Start monitoring
if not options.no_monitoring:
start_monitor(interval=int(options.interval))
# Start server
if not options.no_server:
app.run(debug=options.debug, host=options.host, port=int(options.port))
def stop_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
###############################################################################
def wrap_csv(data, fname='output'):
""" Make CSV response downloadable """
if fname is None:
fname = 'log'
si = StringIO(data)
output = flask.make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=%s.csv" % fname
output.headers["Content-type"] = "text/csv"
return output
def wrap_json(data):
""" Convert CSV to JSON and make it downloadable """
entries = [_.split(',') for _ in data.split('\n') if _ != '']
js = [{k: v for k, v in zip(['timestamp', 'co2', 'temp'], x)}
for x in entries[1:]]
return jsonify(js)
def wrap_table(data):
""" Return HTML for table """
res = ('<table><thead><tr><th>Timestamp</th><th>CO2 concentration</th>'
'<th>Temperature</th></tr></thead><tbody>')
for line in data.split('\n')[1:]:
res += '<tr>' + ''.join(['<td>%s</td>' % | |
form text.
Raise CardinalityError if description already set.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_desc_set:
self.package_desc_set = True
if validations.validate_pkg_desc(text):
doc.package.description = str_from_text(text)
else:
raise SPDXValueError('Package::Description')
else:
raise CardinalityError('Package::Description')
def set_pkg_comment(self, doc, text):
"""
Set the package's comment.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if comment already set.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_comment_set:
self.package_comment_set = True
if validations.validate_pkg_comment(text):
doc.package.comment = str_from_text(text)
else:
raise SPDXValueError('Package::Comment')
else:
raise CardinalityError('Package::Comment')
def set_pkg_ext_ref_category(self, doc, category):
"""
Set the `category` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if validations.validate_pkg_ext_ref_category(category):
if (len(doc.package.pkg_ext_refs) and
doc.package.pkg_ext_refs[-1].category is None):
doc.package.pkg_ext_refs[-1].category = category
else:
doc.package.add_pkg_ext_refs(
package.ExternalPackageRef(category=category))
else:
raise SPDXValueError('ExternalRef::Category')
def set_pkg_ext_ref_type(self, doc, pkg_ext_ref_type):
"""
Set the `pkg_ext_ref_type` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if validations.validate_pkg_ext_ref_type(pkg_ext_ref_type):
if (len(doc.package.pkg_ext_refs) and
doc.package.pkg_ext_refs[-1].pkg_ext_ref_type is None):
doc.package.pkg_ext_refs[-1].pkg_ext_ref_type = pkg_ext_ref_type
else:
doc.package.add_pkg_ext_refs(package.ExternalPackageRef(
pkg_ext_ref_type=pkg_ext_ref_type))
else:
raise SPDXValueError('ExternalRef::Type')
def set_pkg_ext_ref_locator(self, doc, locator):
"""
Set the `locator` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if (len(doc.package.pkg_ext_refs) and
doc.package.pkg_ext_refs[-1].locator is None):
doc.package.pkg_ext_refs[-1].locator = locator
else:
doc.package.add_pkg_ext_refs(package.ExternalPackageRef(
locator=locator))
def add_pkg_ext_ref_comment(self, doc, comment):
"""
Set the `comment` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if not len(doc.package.pkg_ext_refs):
raise OrderError('Package::ExternalRef')
else:
if validations.validate_pkg_ext_ref_comment(comment):
doc.package.pkg_ext_refs[-1].comment = str_from_text(comment)
else:
raise SPDXValueError('ExternalRef::Comment')
def add_pkg_ext_refs(self, doc, category, pkg_ext_ref_type, locator):
self.set_pkg_ext_ref_category(doc, category)
self.set_pkg_ext_ref_type(doc, pkg_ext_ref_type)
self.set_pkg_ext_ref_locator(doc, locator)
def assert_package_exists(self):
if not self.package_set:
raise OrderError('Package')
class FileBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_file_stat()
def set_file_name(self, doc, name):
"""
Raise OrderError if no package defined.
"""
if self.has_package(doc):
doc.package.files.append(file.File(name))
# A file name marks the start of a new file instance.
# The builder must be reset
# FIXME: this state does not make sense
self.reset_file_stat()
return True
else:
raise OrderError('File::Name')
def set_file_spdx_id(self, doc, spdx_id):
"""
Set the file SPDX Identifier.
Raise OrderError if no package or no file defined.
Raise SPDXValueError if malformed value.
Raise CardinalityError if more than one spdx_id set.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_spdx_id_set:
self.file_spdx_id_set = True
if validations.validate_file_spdx_id(spdx_id):
self.file(doc).spdx_id = spdx_id
return True
else:
raise SPDXValueError('File::SPDXID')
else:
raise CardinalityError('File::SPDXID')
else:
raise OrderError('File::SPDXID')
def set_file_comment(self, doc, text):
"""
Raise OrderError if no package or no file defined.
Raise CardinalityError if more than one comment set.
Raise SPDXValueError if text is not free form text.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_comment_set:
self.file_comment_set = True
if validations.validate_file_comment(text):
self.file(doc).comment = str_from_text(text)
return True
else:
raise SPDXValueError('File::Comment')
else:
raise CardinalityError('File::Comment')
else:
raise OrderError('File::Comment')
def set_file_type(self, doc, type_value):
"""
Raise OrderError if no package or file defined.
Raise CardinalityError if more than one type set.
Raise SPDXValueError if type is unknown.
"""
type_dict = {
'SOURCE': file.FileType.SOURCE,
'BINARY': file.FileType.BINARY,
'ARCHIVE': file.FileType.ARCHIVE,
'OTHER': file.FileType.OTHER
}
if self.has_package(doc) and self.has_file(doc):
if not self.file_type_set:
self.file_type_set = True
if type_value in type_dict.keys():
self.file(doc).type = type_dict[type_value]
return True
else:
raise SPDXValueError('File::Type')
else:
raise CardinalityError('File::Type')
else:
raise OrderError('File::Type')
def set_file_chksum(self, doc, chksum):
"""
Raise OrderError if no package or file defined.
Raise CardinalityError if more than one chksum set.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_chksum_set:
self.file_chksum_set = True
self.file(doc).chk_sum = checksum_from_sha1(chksum)
return True
else:
raise CardinalityError('File::CheckSum')
else:
raise OrderError('File::CheckSum')
def set_concluded_license(self, doc, lic):
"""
Raise OrderError if no package or file defined.
Raise CardinalityError if already set.
Raise SPDXValueError if malformed.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_conc_lics_set:
self.file_conc_lics_set = True
if validations.validate_lics_conc(lic):
self.file(doc).conc_lics = lic
return True
else:
raise SPDXValueError('File::ConcludedLicense')
else:
raise CardinalityError('File::ConcludedLicense')
else:
raise OrderError('File::ConcludedLicense')
def set_file_license_in_file(self, doc, lic):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if malformed value.
"""
if self.has_package(doc) and self.has_file(doc):
if validations.validate_file_lics_in_file(lic):
self.file(doc).add_lics(lic)
return True
else:
raise SPDXValueError('File::LicenseInFile')
else:
raise OrderError('File::LicenseInFile')
def set_file_license_comment(self, doc, text):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if more than one per file.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_license_comment_set:
self.file_license_comment_set = True
if validations.validate_file_lics_comment(text):
self.file(doc).license_comment = str_from_text(text)
else:
raise SPDXValueError('File::LicenseComment')
else:
raise CardinalityError('File::LicenseComment')
else:
raise OrderError('File::LicenseComment')
def set_file_copyright(self, doc, text):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if not free form text or NONE or NO_ASSERT.
Raise CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_copytext_set:
self.file_copytext_set = True
if validations.validate_file_cpyright(text):
if isinstance(text, string_types):
self.file(doc).copyright = str_from_text(text)
else:
self.file(doc).copyright = text # None or NoAssert
return True
else:
raise SPDXValueError('File::CopyRight')
else:
raise CardinalityError('File::CopyRight')
else:
raise OrderError('File::CopyRight')
def set_file_notice(self, doc, text):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if not free form text.
Raise CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_notice_set:
self.file_notice_set = True
if validations.validate_file_notice(text):
self.file(doc).notice = str_from_text(text)
else:
raise SPDXValueError('File::Notice')
else:
raise CardinalityError('File::Notice')
else:
raise OrderError('File::Notice')
def add_file_contribution(self, doc, value):
"""
Raise OrderError if no package or file defined.
"""
if self.has_package(doc) and self.has_file(doc):
self.file(doc).add_contrib(value)
else:
raise OrderError('File::Contributor')
def add_file_dep(self, doc, value):
"""
Raise OrderError if no package or file defined.
"""
if self.has_package(doc) and self.has_file(doc):
self.file(doc).add_depend(value)
else:
raise OrderError('File::Dependency')
def set_file_atrificat_of_project(self, doc, symbol, value):
"""
Set a file name, uri or home artificat.
Raise OrderError if no package or file defined.
"""
if self.has_package(doc) and self.has_file(doc):
self.file(doc).add_artifact(symbol, value)
else:
raise OrderError('File::Artificat')
def file(self, doc):
"""
Return the last file in the document's package's file list.
"""
return doc.package.files[-1]
def has_file(self, doc):
"""
Return true if the document's package has at least one file.
Does not test if the document has a package.
"""
return len(doc.package.files) != 0
def has_package(self, doc):
"""
Return true if the document has a package.
"""
return doc.package is not None
def reset_file_stat(self):
"""
Reset the builder's state to enable building new files.
"""
# FIXME: this state does not make sense
self.file_spdx_id_set = False
self.file_comment_set = False
self.file_type_set = False
self.file_chksum_set = False
self.file_conc_lics_set = False
self.file_license_comment_set = False
self.file_notice_set = False
self.file_copytext_set = False
class LicenseBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_extr_lics()
def extr_lic(self, doc):
"""
Retrieve last license in extracted license list.
"""
return doc.extracted_licenses[-1]
def has_extr_lic(self, doc):
return len(doc.extracted_licenses) != 0
def set_lic_id(self, doc, lic_id):
"""
Add a new extracted license to the document.
Raise SPDXValueError if data format is incorrect.
"""
# FIXME: this state does not make sense
self.reset_extr_lics()
if validations.validate_extracted_lic_id(lic_id):
doc.add_extr_lic(document.ExtractedLicense(lic_id))
return True
else:
raise SPDXValueError('ExtractedLicense::id')
def set_lic_text(self, doc, text):
"""
Set license extracted text.
Raise SPDXValueError if text is not free form text.
Raise OrderError if no license ID defined.
"""
if self.has_extr_lic(doc):
if not self.extr_text_set:
self.extr_text_set = True
if validations.validate_is_free_form_text(text):
self.extr_lic(doc).text = str_from_text(text)
return True
else:
raise SPDXValueError('ExtractedLicense::text')
else:
raise CardinalityError('ExtractedLicense::text')
else:
raise OrderError('ExtractedLicense::text')
def set_lic_name(self, doc, name):
"""
Set license name.
Raise SPDXValueError if name is not str or utils.NoAssert
Raise OrderError if no license id defined.
"""
if self.has_extr_lic(doc):
if not self.extr_lic_name_set:
self.extr_lic_name_set = True
if validations.validate_extr_lic_name(name):
self.extr_lic(doc).full_name = name
return True
else:
raise SPDXValueError('ExtractedLicense::Name')
else:
raise CardinalityError('ExtractedLicense::Name')
else:
raise OrderError('ExtractedLicense::Name')
def set_lic_comment(self, doc, comment):
"""
Set license comment.
Raise SPDXValueError if comment is not free form text.
Raise OrderError if no license ID defined.
"""
if self.has_extr_lic(doc):
if not self.extr_lic_comment_set:
self.extr_lic_comment_set = True
if validations.validate_is_free_form_text(comment):
self.extr_lic(doc).comment = str_from_text(comment)
return True
else:
raise SPDXValueError('ExtractedLicense::comment')
else:
raise CardinalityError('ExtractedLicense::comment')
else:
raise OrderError('ExtractedLicense::comment')
def add_lic_xref(self, doc, ref):
"""
Add a license cross reference.
Raise OrderError if no License ID defined.
"""
if self.has_extr_lic(doc):
self.extr_lic(doc).add_xref(ref)
return True
else:
raise OrderError('ExtractedLicense::CrossRef')
def reset_extr_lics(self):
# FIXME: this state does not make sense
self.extr_text_set = False
self.extr_lic_name_set = False
self.extr_lic_comment_set = False
class SnippetBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_snippet()
def create_snippet(self, doc, spdx_id):
"""
Create a snippet for the SPDX Document.
spdx_id - To uniquely identify any element in an SPDX document which
may be referenced by other elements.
Raise SPDXValueError if the data is a malformed value.
"""
self.reset_snippet()
spdx_id = spdx_id.split('#')[-1]
if validations.validate_snippet_spdx_id(spdx_id):
doc.add_snippet(snippet.Snippet(spdx_id=spdx_id))
self.snippet_spdx_id_set = True
return True
| |
None,
interchange_control_number_prefix: Optional[str] = None,
interchange_control_number_suffix: Optional[str] = None,
sender_reverse_routing_address: Optional[str] = None,
receiver_reverse_routing_address: Optional[str] = None,
functional_group_id: Optional[str] = None,
group_controlling_agency_code: Optional[str] = None,
group_message_version: Optional[str] = None,
group_message_release: Optional[str] = None,
group_control_number_prefix: Optional[str] = None,
group_control_number_suffix: Optional[str] = None,
group_application_receiver_qualifier: Optional[str] = None,
group_application_receiver_id: Optional[str] = None,
group_application_sender_qualifier: Optional[str] = None,
group_application_sender_id: Optional[str] = None,
group_application_password: Optional[str] = None,
transaction_set_control_number_prefix: Optional[str] = None,
transaction_set_control_number_suffix: Optional[str] = None,
sender_internal_identification: Optional[str] = None,
sender_internal_sub_identification: Optional[str] = None,
receiver_internal_identification: Optional[str] = None,
receiver_internal_sub_identification: Optional[str] = None,
**kwargs
):
super(EdifactEnvelopeSettings, self).__init__(**kwargs)
self.group_association_assigned_code = group_association_assigned_code
self.communication_agreement_id = communication_agreement_id
self.apply_delimiter_string_advice = apply_delimiter_string_advice
self.create_grouping_segments = create_grouping_segments
self.enable_default_group_headers = enable_default_group_headers
self.recipient_reference_password_value = recipient_reference_password_value
self.recipient_reference_password_qualifier = recipient_reference_password_qualifier
self.application_reference_id = application_reference_id
self.processing_priority_code = processing_priority_code
self.interchange_control_number_lower_bound = interchange_control_number_lower_bound
self.interchange_control_number_upper_bound = interchange_control_number_upper_bound
self.rollover_interchange_control_number = rollover_interchange_control_number
self.interchange_control_number_prefix = interchange_control_number_prefix
self.interchange_control_number_suffix = interchange_control_number_suffix
self.sender_reverse_routing_address = sender_reverse_routing_address
self.receiver_reverse_routing_address = receiver_reverse_routing_address
self.functional_group_id = functional_group_id
self.group_controlling_agency_code = group_controlling_agency_code
self.group_message_version = group_message_version
self.group_message_release = group_message_release
self.group_control_number_lower_bound = group_control_number_lower_bound
self.group_control_number_upper_bound = group_control_number_upper_bound
self.rollover_group_control_number = rollover_group_control_number
self.group_control_number_prefix = group_control_number_prefix
self.group_control_number_suffix = group_control_number_suffix
self.group_application_receiver_qualifier = group_application_receiver_qualifier
self.group_application_receiver_id = group_application_receiver_id
self.group_application_sender_qualifier = group_application_sender_qualifier
self.group_application_sender_id = group_application_sender_id
self.group_application_password = <PASSWORD>
self.overwrite_existing_transaction_set_control_number = overwrite_existing_transaction_set_control_number
self.transaction_set_control_number_prefix = transaction_set_control_number_prefix
self.transaction_set_control_number_suffix = transaction_set_control_number_suffix
self.transaction_set_control_number_lower_bound = transaction_set_control_number_lower_bound
self.transaction_set_control_number_upper_bound = transaction_set_control_number_upper_bound
self.rollover_transaction_set_control_number = rollover_transaction_set_control_number
self.is_test_interchange = is_test_interchange
self.sender_internal_identification = sender_internal_identification
self.sender_internal_sub_identification = sender_internal_sub_identification
self.receiver_internal_identification = receiver_internal_identification
self.receiver_internal_sub_identification = receiver_internal_sub_identification
class EdifactFramingSettings(msrest.serialization.Model):
"""The Edifact agreement framing settings.
All required parameters must be populated in order to send to Azure.
:param service_code_list_directory_version: The service code list directory version.
:type service_code_list_directory_version: str
:param character_encoding: The character encoding.
:type character_encoding: str
:param protocol_version: Required. The protocol version.
:type protocol_version: int
:param data_element_separator: Required. The data element separator.
:type data_element_separator: int
:param component_separator: Required. The component separator.
:type component_separator: int
:param segment_terminator: Required. The segment terminator.
:type segment_terminator: int
:param release_indicator: Required. The release indicator.
:type release_indicator: int
:param repetition_separator: Required. The repetition separator.
:type repetition_separator: int
:param character_set: Required. The EDIFACT frame setting characterSet. Possible values
include: "NotSpecified", "UNOB", "UNOA", "UNOC", "UNOD", "UNOE", "UNOF", "UNOG", "UNOH",
"UNOI", "UNOJ", "UNOK", "UNOX", "UNOY", "KECA".
:type character_set: str or ~azure.mgmt.logic.models.EdifactCharacterSet
:param decimal_point_indicator: Required. The EDIFACT frame setting decimal indicator. Possible
values include: "NotSpecified", "Comma", "Decimal".
:type decimal_point_indicator: str or ~azure.mgmt.logic.models.EdifactDecimalIndicator
:param segment_terminator_suffix: Required. The EDIFACT frame setting segment terminator
suffix. Possible values include: "NotSpecified", "None", "CR", "LF", "CRLF".
:type segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix
"""
_validation = {
'protocol_version': {'required': True},
'data_element_separator': {'required': True},
'component_separator': {'required': True},
'segment_terminator': {'required': True},
'release_indicator': {'required': True},
'repetition_separator': {'required': True},
'character_set': {'required': True},
'decimal_point_indicator': {'required': True},
'segment_terminator_suffix': {'required': True},
}
_attribute_map = {
'service_code_list_directory_version': {'key': 'serviceCodeListDirectoryVersion', 'type': 'str'},
'character_encoding': {'key': 'characterEncoding', 'type': 'str'},
'protocol_version': {'key': 'protocolVersion', 'type': 'int'},
'data_element_separator': {'key': 'dataElementSeparator', 'type': 'int'},
'component_separator': {'key': 'componentSeparator', 'type': 'int'},
'segment_terminator': {'key': 'segmentTerminator', 'type': 'int'},
'release_indicator': {'key': 'releaseIndicator', 'type': 'int'},
'repetition_separator': {'key': 'repetitionSeparator', 'type': 'int'},
'character_set': {'key': 'characterSet', 'type': 'str'},
'decimal_point_indicator': {'key': 'decimalPointIndicator', 'type': 'str'},
'segment_terminator_suffix': {'key': 'segmentTerminatorSuffix', 'type': 'str'},
}
def __init__(
self,
*,
protocol_version: int,
data_element_separator: int,
component_separator: int,
segment_terminator: int,
release_indicator: int,
repetition_separator: int,
character_set: Union[str, "EdifactCharacterSet"],
decimal_point_indicator: Union[str, "EdifactDecimalIndicator"],
segment_terminator_suffix: Union[str, "SegmentTerminatorSuffix"],
service_code_list_directory_version: Optional[str] = None,
character_encoding: Optional[str] = None,
**kwargs
):
super(EdifactFramingSettings, self).__init__(**kwargs)
self.service_code_list_directory_version = service_code_list_directory_version
self.character_encoding = character_encoding
self.protocol_version = protocol_version
self.data_element_separator = data_element_separator
self.component_separator = component_separator
self.segment_terminator = segment_terminator
self.release_indicator = release_indicator
self.repetition_separator = repetition_separator
self.character_set = character_set
self.decimal_point_indicator = decimal_point_indicator
self.segment_terminator_suffix = segment_terminator_suffix
class EdifactMessageFilter(msrest.serialization.Model):
"""The Edifact message filter for odata query.
All required parameters must be populated in order to send to Azure.
:param message_filter_type: Required. The message filter type. Possible values include:
"NotSpecified", "Include", "Exclude".
:type message_filter_type: str or ~azure.mgmt.logic.models.MessageFilterType
"""
_validation = {
'message_filter_type': {'required': True},
}
_attribute_map = {
'message_filter_type': {'key': 'messageFilterType', 'type': 'str'},
}
def __init__(
self,
*,
message_filter_type: Union[str, "MessageFilterType"],
**kwargs
):
super(EdifactMessageFilter, self).__init__(**kwargs)
self.message_filter_type = message_filter_type
class EdifactMessageIdentifier(msrest.serialization.Model):
"""The Edifact message identifier.
All required parameters must be populated in order to send to Azure.
:param message_id: Required. The message id on which this envelope settings has to be applied.
:type message_id: str
"""
_validation = {
'message_id': {'required': True},
}
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
}
def __init__(
self,
*,
message_id: str,
**kwargs
):
super(EdifactMessageIdentifier, self).__init__(**kwargs)
self.message_id = message_id
class EdifactOneWayAgreement(msrest.serialization.Model):
"""The Edifact one way agreement.
All required parameters must be populated in order to send to Azure.
:param sender_business_identity: Required. The sender business identity.
:type sender_business_identity: ~azure.mgmt.logic.models.BusinessIdentity
:param receiver_business_identity: Required. The receiver business identity.
:type receiver_business_identity: ~azure.mgmt.logic.models.BusinessIdentity
:param protocol_settings: Required. The EDIFACT protocol settings.
:type protocol_settings: ~azure.mgmt.logic.models.EdifactProtocolSettings
"""
_validation = {
'sender_business_identity': {'required': True},
'receiver_business_identity': {'required': True},
'protocol_settings': {'required': True},
}
_attribute_map = {
'sender_business_identity': {'key': 'senderBusinessIdentity', 'type': 'BusinessIdentity'},
'receiver_business_identity': {'key': 'receiverBusinessIdentity', 'type': 'BusinessIdentity'},
'protocol_settings': {'key': 'protocolSettings', 'type': 'EdifactProtocolSettings'},
}
def __init__(
self,
*,
sender_business_identity: "BusinessIdentity",
receiver_business_identity: "BusinessIdentity",
protocol_settings: "EdifactProtocolSettings",
**kwargs
):
super(EdifactOneWayAgreement, self).__init__(**kwargs)
self.sender_business_identity = sender_business_identity
self.receiver_business_identity = receiver_business_identity
self.protocol_settings = protocol_settings
class EdifactProcessingSettings(msrest.serialization.Model):
"""The Edifact agreement protocol settings.
All required parameters must be populated in order to send to Azure.
:param mask_security_info: Required. The value indicating whether to mask security information.
:type mask_security_info: bool
:param preserve_interchange: Required. The value indicating whether to preserve interchange.
:type preserve_interchange: bool
:param suspend_interchange_on_error: Required. The value indicating whether to suspend
interchange on error.
:type suspend_interchange_on_error: bool
:param create_empty_xml_tags_for_trailing_separators: Required. The value indicating whether to
create empty xml tags for trailing separators.
:type create_empty_xml_tags_for_trailing_separators: bool
:param use_dot_as_decimal_separator: Required. The value indicating whether to use dot as
decimal separator.
:type use_dot_as_decimal_separator: bool
"""
_validation = {
'mask_security_info': {'required': True},
'preserve_interchange': {'required': True},
'suspend_interchange_on_error': {'required': True},
'create_empty_xml_tags_for_trailing_separators': {'required': True},
'use_dot_as_decimal_separator': {'required': True},
}
_attribute_map = {
'mask_security_info': {'key': 'maskSecurityInfo', 'type': 'bool'},
'preserve_interchange': {'key': 'preserveInterchange', 'type': 'bool'},
'suspend_interchange_on_error': {'key': 'suspendInterchangeOnError', 'type': 'bool'},
'create_empty_xml_tags_for_trailing_separators': {'key': 'createEmptyXmlTagsForTrailingSeparators', 'type': 'bool'},
'use_dot_as_decimal_separator': {'key': 'useDotAsDecimalSeparator', 'type': 'bool'},
}
def __init__(
self,
*,
mask_security_info: bool,
preserve_interchange: bool,
suspend_interchange_on_error: bool,
create_empty_xml_tags_for_trailing_separators: bool,
use_dot_as_decimal_separator: bool,
**kwargs
):
super(EdifactProcessingSettings, self).__init__(**kwargs)
self.mask_security_info = mask_security_info
self.preserve_interchange = preserve_interchange
self.suspend_interchange_on_error = suspend_interchange_on_error
self.create_empty_xml_tags_for_trailing_separators = create_empty_xml_tags_for_trailing_separators
self.use_dot_as_decimal_separator = use_dot_as_decimal_separator
class EdifactProtocolSettings(msrest.serialization.Model):
"""The Edifact agreement protocol settings.
All required parameters must be populated in order to send to Azure.
:param validation_settings: Required. The EDIFACT validation settings.
:type validation_settings: ~azure.mgmt.logic.models.EdifactValidationSettings
:param framing_settings: Required. The EDIFACT framing settings.
:type framing_settings: ~azure.mgmt.logic.models.EdifactFramingSettings
:param envelope_settings: Required. The EDIFACT envelope settings.
:type envelope_settings: ~azure.mgmt.logic.models.EdifactEnvelopeSettings
:param acknowledgement_settings: Required. The EDIFACT acknowledgement settings.
:type acknowledgement_settings: ~azure.mgmt.logic.models.EdifactAcknowledgementSettings
:param message_filter: Required. The EDIFACT message filter.
:type message_filter: ~azure.mgmt.logic.models.EdifactMessageFilter
:param processing_settings: Required. The EDIFACT processing Settings.
:type processing_settings: ~azure.mgmt.logic.models.EdifactProcessingSettings
:param envelope_overrides: The EDIFACT envelope override settings.
:type envelope_overrides: list[~azure.mgmt.logic.models.EdifactEnvelopeOverride]
:param message_filter_list: The EDIFACT message filter list.
:type message_filter_list: list[~azure.mgmt.logic.models.EdifactMessageIdentifier]
:param schema_references: Required. The EDIFACT schema references.
:type schema_references: list[~azure.mgmt.logic.models.EdifactSchemaReference]
:param validation_overrides: The EDIFACT validation override settings.
:type validation_overrides: list[~azure.mgmt.logic.models.EdifactValidationOverride]
:param edifact_delimiter_overrides: The EDIFACT delimiter override settings.
:type edifact_delimiter_overrides: list[~azure.mgmt.logic.models.EdifactDelimiterOverride]
"""
_validation = {
'validation_settings': {'required': True},
'framing_settings': {'required': True},
'envelope_settings': {'required': True},
'acknowledgement_settings': {'required': True},
'message_filter': {'required': True},
'processing_settings': {'required': True},
'schema_references': {'required': True},
}
_attribute_map = {
'validation_settings': {'key': 'validationSettings', 'type': 'EdifactValidationSettings'},
'framing_settings': {'key': 'framingSettings', 'type': 'EdifactFramingSettings'},
'envelope_settings': {'key': 'envelopeSettings', 'type': 'EdifactEnvelopeSettings'},
'acknowledgement_settings': {'key': 'acknowledgementSettings', 'type': 'EdifactAcknowledgementSettings'},
'message_filter': {'key': 'messageFilter', 'type': 'EdifactMessageFilter'},
'processing_settings': {'key': 'processingSettings', 'type': 'EdifactProcessingSettings'},
'envelope_overrides': {'key': 'envelopeOverrides', 'type': '[EdifactEnvelopeOverride]'},
'message_filter_list': {'key': 'messageFilterList', 'type': '[EdifactMessageIdentifier]'},
'schema_references': {'key': 'schemaReferences', 'type': '[EdifactSchemaReference]'},
'validation_overrides': {'key': 'validationOverrides', 'type': '[EdifactValidationOverride]'},
'edifact_delimiter_overrides': {'key': 'edifactDelimiterOverrides', 'type': '[EdifactDelimiterOverride]'},
}
def __init__(
self,
*,
validation_settings: "EdifactValidationSettings",
framing_settings: "EdifactFramingSettings",
envelope_settings: "EdifactEnvelopeSettings",
acknowledgement_settings: "EdifactAcknowledgementSettings",
message_filter: "EdifactMessageFilter",
processing_settings: "EdifactProcessingSettings",
schema_references: List["EdifactSchemaReference"],
envelope_overrides: Optional[List["EdifactEnvelopeOverride"]] = None,
message_filter_list: Optional[List["EdifactMessageIdentifier"]] = None,
validation_overrides: Optional[List["EdifactValidationOverride"]] = None,
edifact_delimiter_overrides: Optional[List["EdifactDelimiterOverride"]] = None,
**kwargs
):
super(EdifactProtocolSettings, self).__init__(**kwargs)
self.validation_settings = validation_settings
self.framing_settings = framing_settings
self.envelope_settings = envelope_settings
self.acknowledgement_settings = acknowledgement_settings
self.message_filter = message_filter
self.processing_settings = processing_settings
self.envelope_overrides = envelope_overrides
self.message_filter_list = message_filter_list
self.schema_references = schema_references
self.validation_overrides = validation_overrides
self.edifact_delimiter_overrides = edifact_delimiter_overrides
class EdifactSchemaReference(msrest.serialization.Model):
"""The Edifact schema reference.
All required parameters must be populated in order to send to Azure.
:param message_id: Required. The message id.
:type message_id: str
:param message_version: Required. The message version.
:type message_version: str
:param message_release: Required. The message release version.
:type | |
# limit memory usage..
import glob
import logging
import os
import cv2
import numpy as np
import pandas
# limit memory usage..
from keras import backend as K
from keras.layers import Input, Convolution3D, MaxPooling3D, Flatten, AveragePooling3D
from keras.metrics import binary_accuracy, binary_crossentropy, mean_absolute_error
from keras.models import Model
from keras.optimizers import SGD
from ...preprocess.lung_segmentation import rescale_patient_images
try:
from ....config import Config
except ValueError:
from config import Config
CUBE_SIZE = 32
CROP_SIZE = 32
MEAN_PIXEL_VALUE = 41
EXTRACTED_IMAGE_DIR = Config.EXTRACTED_IMAGE_DIR
NODULE_DETECTION_DIR = "data/detections/"
K.set_image_dim_ordering("tf")
POS_WEIGHT = 2
NEGS_PER_POS = 20
P_TH = 0.6
LEARN_RATE = 0.001
PREDICT_STEP = 12
BATCH_SIZE = 128
STEP = PREDICT_STEP
def load_patient_images(patient_id, base_dir=EXTRACTED_IMAGE_DIR, wildcard="*.*", exclude_wildcards=None):
exclude_wildcards = exclude_wildcards or []
src_dir = os.path.join(os.getcwd(), base_dir, patient_id)
src_img_paths = glob.glob(src_dir + wildcard)
for exclude_wildcard in exclude_wildcards:
exclude_img_paths = glob.glob(src_dir + exclude_wildcard)
src_img_paths = [im for im in src_img_paths if im not in exclude_img_paths]
src_img_paths.sort()
images = [cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) for img_path in src_img_paths]
images = [im.reshape((1,) + im.shape) for im in images]
res = np.vstack(images)
return res
def prepare_image_for_net3D(img):
img = img.astype(np.float32)
img -= MEAN_PIXEL_VALUE
img /= 255.
img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2], 1)
return img
def filter_patient_nodules_predictions(df_nodule_predictions: pandas.DataFrame, patient_id, view_size):
patient_mask = load_patient_images(patient_id, wildcard="*_m.png")
delete_indices = []
for index, row in df_nodule_predictions.iterrows():
z_perc = row["coord_z"]
y_perc = row["coord_y"]
center_x = int(round(row["coord_x"] * patient_mask.shape[2]))
center_y = int(round(y_perc * patient_mask.shape[1]))
center_z = int(round(z_perc * patient_mask.shape[0]))
mal_score = row["diameter_mm"]
start_y = center_y - view_size / 2
start_x = center_x - view_size / 2
nodule_in_mask = False
for z_index in [-1, 0, 1]:
img = patient_mask[z_index + center_z]
start_x = int(start_x)
start_y = int(start_y)
view_size = int(view_size)
img_roi = img[start_y:start_y + view_size, start_x:start_x + view_size]
if img_roi.sum() > 255: # more than 1 pixel of mask.
nodule_in_mask = True
if not nodule_in_mask:
logging.info("Nodule not in mask: ", (center_x, center_y, center_z))
if mal_score > 0:
mal_score *= -1
df_nodule_predictions.loc[index, "diameter_mm"] = mal_score
else:
if center_z < 30:
logging.info("Z < 30: ", patient_id, " center z:", center_z, " y_perc: ", y_perc)
if mal_score > 0:
mal_score *= -1
df_nodule_predictions.loc[index, "diameter_mm"] = mal_score
if (z_perc > 0.75 or z_perc < 0.25) and y_perc > 0.85:
logging.info("SUSPICIOUS FALSEPOSITIVE: ", patient_id, " center z:", center_z, " y_perc: ", y_perc)
if center_z < 50 and y_perc < 0.30:
logging.info("SUSPICIOUS FALSEPOSITIVE OUT OF RANGE: ", patient_id, " center z:", center_z, " y_perc: ",
y_perc)
df_nodule_predictions.drop(df_nodule_predictions.index[delete_indices], inplace=True)
return df_nodule_predictions
def get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1), load_weight_path=None) -> Model:
"""Load the pre-trained 3D ConvNet that should be used to predict a nodule and its malignancy.
Args:
input_shape: shape of the input layer. Defaults to (CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1).
load_weight_path: path of the trained model weights.
Returns:
keras.models.Model
"""
inputs = Input(shape=input_shape, name="input_1")
x = inputs
x = AveragePooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1), padding="same")(x)
x = Convolution3D(64, (3, 3, 3), activation='relu', padding='same', name='conv1', strides=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', name='pool1')(x)
# 2nd layer group
x = Convolution3D(128, (3, 3, 3), activation='relu', padding='same', name='conv2', strides=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool2')(x)
# 3rd layer group
x = Convolution3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3a', strides=(1, 1, 1))(x)
x = Convolution3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3b', strides=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool3')(x)
# 4th layer group
x = Convolution3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4a', strides=(1, 1, 1))(x)
x = Convolution3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4b', strides=(1, 1, 1), )(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool4')(x)
last64 = Convolution3D(64, (2, 2, 2), activation="relu", name="last_64")(x)
out_class = Convolution3D(1, (1, 1, 1), activation="sigmoid", name="out_class_last")(last64)
out_class = Flatten(name="out_class")(out_class)
out_malignancy = Convolution3D(1, (1, 1, 1), activation=None, name="out_malignancy_last")(last64)
out_malignancy = Flatten(name="out_malignancy")(out_malignancy)
model = Model(input=inputs, output=[out_class, out_malignancy])
model.load_weights(load_weight_path)
model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True),
loss={"out_class": "binary_crossentropy", "out_malignancy": mean_absolute_error},
metrics={"out_class": [binary_accuracy, binary_crossentropy], "out_malignancy": mean_absolute_error})
return model
def prepare_data(patient_id, magnification=1):
"""By a given patient ID prepare_data returns three np.ndarray:
a 3D image array, a mask and a placeholder for a predict values.
Args:
patient_id: SeriesInstanceUID of the patient.
magnification: what magnification for the model to use, one of (1, 1.5, 2).
Returns:
np.ndarray a 3D image array.
np.ndarray a mask with a shape of the 3D image array.
np.ndarray a placeholder for a predict values.
"""
patient_img = load_patient_images(patient_id, wildcard="*_i.png", exclude_wildcards=[])
if magnification != 1:
patient_img = rescale_patient_images(patient_img, (1, 1, 1), magnification)
patient_mask = load_patient_images(patient_id, wildcard="*_m.png", exclude_wildcards=[])
if magnification != 1:
patient_mask = rescale_patient_images(patient_mask, (1, 1, 1), magnification, is_mask_image=True)
predict_volume_shape_list = [0, 0, 0]
for dim in range(3):
dim_indent = 0
while dim_indent + CROP_SIZE < patient_img.shape[dim]:
predict_volume_shape_list[dim] += 1
dim_indent += STEP
predict_volume_shape = (predict_volume_shape_list[0],
predict_volume_shape_list[1],
predict_volume_shape_list[2])
predict_volume = np.zeros(shape=predict_volume_shape, dtype=float)
return patient_img, patient_mask, predict_volume
def predict_cubes(model_path, patient_id, magnification=1, ext_name=""):
"""Return a DataFrame including position, diameter and chance of abnormal tissue to be a nodule.
Args:
model_path: path to the pre-trained model that should be used for the prediction
patient_id: SeriesInstanceUID of the patient
magnification: what magnification for the model to use, one of (1, 1.5, 2)
ext_name: external name of the model, one of ("luna16_fs", "luna_posnegndsb_v")
Returns:
dict: a dictionary containing anno_index, coord_x, coord_y, coord_z, diameter, nodule_chance, diameter_mm
of each found nodule for each patient, of the form::
{
patient_id (str): pandas.DataFrame,
..
}
"""
dst_dir = NODULE_DETECTION_DIR
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
dst_dir = os.path.join(dst_dir, "predictions" + str(int(magnification * 10)) + "_" + ext_name)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
model = get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1),
load_weight_path=model_path)
patients_dfs = {}
patient_ids = [patient_id]
# In the original Julian de Wit implementation `os.listdir` was used to extract
# all subdirectories from `EXTRACTED_IMAGE_DIR`. The order wasn't necessary there
# since each `base_name` represents a different patient directory.
# In the adopted version (see PR #118), `return df` statement returns a data frame only
# for the last patient, though. Since it's not the original behaviour followed by this,
# it was corrected in PR #172 to store all patients' data frames in a `patients_dfs`
# dictionary which will be returned.
for base_name in os.listdir(EXTRACTED_IMAGE_DIR):
if os.path.isdir(os.path.join(EXTRACTED_IMAGE_DIR, base_name)):
patient_ids.append(base_name)
for patient_index, patient_id in enumerate(reversed(patient_ids)):
logging.info(patient_index, ": ", patient_id)
patient_img, patient_mask, predict_volume = prepare_data(patient_id, magnification)
patient_predictions_csv = annotate(model, predict_volume, patient_img, patient_mask)
df = pandas.DataFrame(patient_predictions_csv,
columns=["anno_index", "coord_x", "coord_y", "coord_z",
"diameter", "nodule_chance", "diameter_mm"])
filter_patient_nodules_predictions(df, patient_id, CROP_SIZE * magnification)
patients_dfs[patient_id] = df
return patients_dfs
def annotate(model, predict_volume, patient_img, patient_mask):
"""Return a DataFrame including position, diameter and chance of abnormal tissue to be a nodule.
By a given model and a volumetric data.
Args:
model: 3D ConvNet that should be used to predict a nodule and its malignancy.
predict_volume:
patient_img:
patient_mask:
Returns:
pandas.DataFrame containing anno_index, coord_x, coord_y, coord_z, diameter, nodule_chance, diameter_mm
of each found nodule.
"""
done_count = 0
skipped_count = 0
annotation_index = 0
batch_list = []
batch_list_coords = []
patient_predictions_csv = []
logging.info("Predicted Volume Shape:" + str(predict_volume.shape))
for z, y, x in np.ndindex(predict_volume.shape[:3]):
# if cube_img is None:
cube_img = patient_img[z * STEP: z * STEP + CROP_SIZE,
y * STEP: y * STEP + CROP_SIZE,
x * STEP: x * STEP + CROP_SIZE]
cube_mask = patient_mask[z * STEP: z * STEP + CROP_SIZE,
y * STEP: y * STEP + CROP_SIZE,
x * STEP: x * STEP + CROP_SIZE]
done_count += 1
if done_count % 10000 == 0:
logging.info("Done: ", done_count, " skipped:", skipped_count)
if cube_mask.sum() < 2000:
skipped_count += 1
continue
if CROP_SIZE != CUBE_SIZE:
cube_img = rescale_patient_images(cube_img, (CUBE_SIZE, CUBE_SIZE, CUBE_SIZE))
# if you want to consider CROP_SIZE != CUBE_SIZE, see PR #147 for rescale_patient_images2 which
# rescales input images to support this case
batch_list_coords.append((z, y, x))
img_prep = prepare_image_for_net3D(cube_img)
batch_list.append(img_prep)
if len(batch_list) % BATCH_SIZE == 0:
batch_data = np.vstack(batch_list)
p = model.predict(batch_data, batch_size=BATCH_SIZE)
ppc, annotation_index = stats_from_batch(p, patient_img.shape, predict_volume,
batch_list_coords, annotation_index)
patient_predictions_csv.extend(ppc)
batch_list[:] = []
batch_list_coords[:] = []
return patient_predictions_csv
def stats_from_batch(p, p_shape, predict_volume, batch_list_coords, annotation_index):
"""Return a list of DataFrame including position, diameter and chance of abnormal tissue to be a nodule
for each nodule in a batch.
Args:
p : an output from th 3D ConvNet, length of p[0] is equal to a batch size.
p_shape (list[int]): a | |
= thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ComponentHubSettings, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_component_hub_settings" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_component_hub_settings`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_component_hub_settings`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/hub/{name}/settings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ComponentHubSettings', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_component_version(self, owner, entity, name, **kwargs): # noqa: E501
"""Get component version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_component_version(owner, entity, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str entity: Entity: project name, hub name, registry name, ... (required)
:param str name: Sub-entity name (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ComponentVersion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_component_version_with_http_info(owner, entity, name, **kwargs) # noqa: E501
def get_component_version_with_http_info(self, owner, entity, name, **kwargs): # noqa: E501
"""Get component version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_component_version_with_http_info(owner, entity, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str entity: Entity: project name, hub name, registry name, ... (required)
:param str name: Sub-entity name (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ComponentVersion, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'entity',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_component_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_component_version`") # noqa: E501
# verify the required parameter 'entity' is set
if self.api_client.client_side_validation and ('entity' not in local_var_params or # noqa: E501
local_var_params['entity'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity` when calling `get_component_version`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_component_version`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'entity' in local_var_params:
path_params['entity'] = local_var_params['entity'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/hub/{entity}/versions/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ComponentVersion', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_component_version_stages(self, owner, entity, name, **kwargs): # noqa: E501
"""Get component version stages # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_component_version_stages(owner, entity, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str entity: Entity: project name, hub name, registry name, ... (required)
:param str name: Sub-entity name (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Stage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_component_version_stages_with_http_info(owner, entity, name, **kwargs) # noqa: E501
def get_component_version_stages_with_http_info(self, owner, entity, name, **kwargs): # noqa: E501
"""Get component version stages # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_component_version_stages_with_http_info(owner, entity, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str entity: Entity: project name, hub name, registry name, ... (required)
:param str name: Sub-entity name (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Stage, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'entity',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_component_version_stages" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_component_version_stages`") # noqa: E501
# verify the required parameter 'entity' is set
if self.api_client.client_side_validation and ('entity' not in local_var_params or # noqa: E501
local_var_params['entity'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity` when calling `get_component_version_stages`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is | |
# -*- coding: UTF-8 -*-
"""
Author:wistn
since:2020-09-11 18:44:30
LastEditors:Do not edit
LastEditTime:2021-04-13
Description:
"""
import asyncio
import re
import traceback
from .android_util_Log import Log
from .org_noear_sited_Util import Util
from .org_noear_sited_SdAttributeList import SdAttributeList
from .mytool import TextUtils
from .org_noear_sited_SdApi import SdApi
from .org_noear_sited_SdNodeSet import SdNodeSet
from .org_noear_sited_SdNode import SdNode
from .org_noear_sited_JsEngine import JsEngine
from .org_noear_sited_SdJscript import SdJscript
from .org_noear_sited___AsyncTag import __AsyncTag as AsyncTag
from .org_noear_sited_DataContext import DataContext
from .org_noear_sited_HttpMessage import HttpMessage
class SdSource:
def encode(self):
return self._encode
def ua(self):
if TextUtils.isEmpty(self._ua):
return Util.defUA
else:
return self._ua
def cookies(self):
return self._cookies
def setCookies(self, cookies):
self._cookies = cookies
def delCache(self, key):
Util.cache.delete(key)
# --------------------------------
async def __new__(cls, app, xml):
asyncInstance = object.__new__(cls)
await asyncInstance.__init__(app, xml)
return asyncInstance
async def __init__(self, app, xml):
self.attrs = SdAttributeList()
self.schema = 0
self.isDebug = False # 是否为调试模式
self.engine = 0 # 引擎版本号
self.url_md5 = None
self.url = None # 源首页
self.title = None # 标题
self.expr = None # 匹配源的表达式
self._encode = None # 编码
self._ua = None
self._cookies = None
self.head = None
self.body = None
self.js = None # 不能作为属性
self.script = None
self.root = None
self.xmlBodyName = None
self.xmlHeadName = None
self.xmlScriptName = None
if self.__class__ == SdSource:
self.doInit(app, xml)
self.xmlHeadName = "head"
self.xmlBodyName = "body"
self.xmlScriptName = "script"
if self.__class__ == SdSource:
await self.doLoad(app)
def doInit(self, app, xml):
Util.tryInitCache("app.getApplicationContext()")
self.root = Util.getXmlroot(xml) # root为根节点即插件里?xml的下一行。
temp = self.root.attrib.items()
for [key, value] in temp:
self.attrs.set(key, value) # 存储元素的属性
temp = list(self.root)
for i in range(temp.__len__()):
p = temp[i]
if isinstance(p.tag, str) and p.attrib.items().__len__() == 0:
if list(p).__len__() == 0:
# 说明p是<title>xxx</title>这种元素类型
self.attrs.set(p.tag, p.text)
self.schema = self.attrs.getInt("schema")
self.engine = self.attrs.getInt("engine")
self.isDebug = self.attrs.getInt("debug") > 0
async def doLoad(self, app):
self.xmlHeadName = self.attrs.getString("head", self.xmlHeadName)
self.xmlBodyName = self.attrs.getString("body", self.xmlBodyName)
self.xmlScriptName = self.attrs.getString("script", self.xmlScriptName)
# 1.head
self.head = SdApi.createNodeSet(self, self.xmlHeadName)
# self.head = SdNodeSet(self)
# 小心SdNode require循环
self.head.buildForNode(Util.getElement(self.root, self.xmlHeadName))
if self.schema >= 1:
self.head.attrs.addAll(self.attrs)
else:
self.head.attrs = self.attrs # 旧版本没有head,所以把当前属性让给head
# 2.body
self.body = SdApi.createNodeSet(self, self.xmlBodyName)
# self.body = SdNodeSet(self) 小心require循环
self.body.buildForNode(Util.getElement(self.root, self.xmlBodyName))
self.title = self.head.attrs.getString("title")
self.expr = self.head.attrs.getString("expr")
self.url = self.head.attrs.getString("url")
self.url_md5 = Util.md5(self.url)
self._encode = self.head.attrs.getString("encode")
self._ua = self.head.attrs.getString("ua")
# ----------
# 3.script :: 放后面
#
self.js = JsEngine(app, self)
self.script = SdJscript(self, Util.getElement(self.root, self.xmlScriptName))
await self.script.loadJs(app, self.js)
self.root = None
def DoCheck(self, url, cookies, isFromAuto):
return True
async def DoTraceUrl(self, url, args, config):
pass
#
# ------------
#
def isMatch(self, url):
pattern = re.compile(self.expr)
return pattern.search(url)
def loadJs(self, jsCode):
self.js.loadJs(jsCode)
def callJs(self, fun, attrs):
return self.js.callJs(fun, attrs)
# -------------
def parse(self, config, url, html):
Log.v("parse", url)
Log.v("parse", "None" if html == None else html)
temp = config.parse(url, html)
if temp == None:
Log.v("parse.rst", "None" + "\r\n\n")
else:
Log.v("parse.rst", temp + "\r\n\n")
return temp
def parseUrl(self, config, url, html):
Log.v("parseUrl", url)
Log.v("parseUrl", "None" if html == None else html)
temp = config.parseUrl(url, html)
if temp == None:
return ""
else:
return temp
#
# ---------------------------------------
#
async def getNodeViewModel(self, *arguments):
len = arguments.__len__()
if len == 4:
viewModel = arguments[0]
nodeSet = arguments[1]
isUpdate = arguments[2]
callback = arguments[3] # home节点
tag = AsyncTag()
dataContext = DataContext()
asyncTasks = []
for node in nodeSet.nodes():
n = node
asyncTasks.append(
asyncio.create_task(
self.doGetNodeViewModel2(
viewModel,
isUpdate,
tag,
n.url.value,
None,
n,
dataContext,
callback,
)
)
)
await asyncio.gather(*asyncTasks) # python消息循环模型;并发home的子节点函数,回调统一返回。
if tag.total == 0:
await callback(1)
elif len == 6:
if type(arguments[3]) == int:
viewModel = arguments[0]
isUpdate = arguments[1]
key = arguments[2]
page = arguments[3]
config = arguments[4]
callback = arguments[5] # search节点
try:
tag = AsyncTag()
dataContext = DataContext()
await self.doGetNodeViewModel1(
viewModel,
isUpdate,
tag,
config.url.value,
key,
page,
config,
dataContext,
callback,
)
except Exception as ex:
await callback(1)
elif type(arguments[2]) == int:
viewModel = arguments[0]
isUpdate = arguments[1]
page = arguments[2]
url = arguments[3]
config = arguments[4]
callback = arguments[5] # tag节点
config.url.value = url
tag = AsyncTag()
dataContext = DataContext()
await self.doGetNodeViewModel1(
viewModel,
isUpdate,
tag,
url,
None,
page,
config,
dataContext,
callback,
)
elif isinstance(arguments[3], SdNode):
viewModel = arguments[0]
isUpdate = arguments[1]
url = arguments[2]
config = arguments[3]
args = arguments[4]
callback = arguments[5] # book、section节点
# 需要对url进行转换成最新的格式(可能之前的旧的格式缓存)
try:
# if (self.DoCheck(url, self.cookies(), True) == False):
# callback(99)
# return
# python版说明:暂时要注释此判断,因为有login节点的插件对login.check为0的cookie判断DoCheck('', cookies, False)为假不能保存,后面self.cookies()就为None
tag = AsyncTag()
dataContext = DataContext()
await self.doGetNodeViewModel2(
viewModel,
isUpdate,
tag,
url,
args,
config,
dataContext,
callback,
)
if tag.total == 0:
await callback(1)
except Exception as ex:
print(traceback.format_exc())
await callback(1)
elif len == 5:
viewModel = arguments[0]
isUpdate = arguments[1]
url = arguments[2]
config = arguments[3]
callback = arguments[4] # book、section节点
await self.getNodeViewModel(
viewModel, isUpdate, url, config, None, callback
)
async def doGetNodeViewModel1(
self, viewModel, isUpdate, tag, url, key, page, config, dataContext, callback
):
# 适用于search/tag/subtag节点
asyncTasks_doGetNodeViewModel1 = []
msg = HttpMessage()
page += config.addPage # 加上增减量
if key != None and TextUtils.isEmpty(config.addKey) == False:
# 如果有补充关键字
key = key + " " + config.addKey
msg.url = config.getUrl(url, key, page)
if TextUtils.isEmpty(msg.url) and config.hasAdds() == False:
await callback(-3)
return
if TextUtils.isEmpty(msg.url) == False:
msg.rebuild(config)
if "post" == config.method:
msg.rebuildForm(page, key)
else:
msg.url = msg.url.replace("@page", str(page))
if key != None:
# 此时表示是get请求的search节点,只有它才有@key
msg.url = msg.url.replace("@key", Util.urlEncode(key, config))
pageX = page
keyX = key
async def HttpCallback(code, sender, text, url302):
asyncTasks = []
tag.value += 1
if code == 1:
if TextUtils.isEmpty(url302):
url302 = sender.url
if TextUtils.isEmpty(config.onParseUrl) == False:
# url需要解析出来(多个用;隔开)
# 当tag节点有parseUrl时,运行 doParseUrl_Aft 实现parse步骤直接return callback到本类的caller,否则运行 doParse_noAddin 实现parse步骤后回到本方法callback到本类的caller
newUrls = []
rstUrls = self.parseUrl(config, url302, text).split(";")
for url1 in rstUrls:
if url1.__len__() == 0:
continue
if url1.startswith(Util.NEXT_CALL):
SdApi.log(self, "CALL::url=", url1)
msg0 = HttpMessage()
msg0.url = (
url1.replace(Util.NEXT_CALL, "")
.replace("GET::", "")
.replace("POST::", "")
)
msg0.rebuild(config)
if url1.find("POST::") > 0:
msg0.method = "post"
msg0.rebuildForm(pageX, keyX)
else:
msg0.method = "get"
msg0.callback = msg.callback
tag.total += 1
asyncTasks.append(
asyncio.create_task(Util.http(self, isUpdate, msg0))
)
else:
newUrls.append(url1)
if newUrls.__len__() > 0:
asyncTasks.append(
asyncio.create_task(
self.doParseUrl_Aft(
viewModel,
config,
isUpdate,
newUrls,
sender.form,
tag,
dataContext,
callback,
)
)
)
await asyncio.gather(*asyncTasks)
if asyncTasks.__len__() == 0 and tag.total == tag.value:
await callback(-2) # parseUrl函数出错时,引擎这样处理来退出
return
else:
self.doParse_noAddin(viewModel, config, url302, text)
if tag.total == tag.value:
await callback(code)
msg.callback = HttpCallback
tag.total += 1
asyncTasks_doGetNodeViewModel1.append(
asyncio.create_task(Util.http(self, isUpdate, msg))
)
if config.hasAdds():
# 2.2 获取副内容(可能有多个)
for n1 in config.adds():
if n1.isEmptyUrl():
continue
urlA = n1.url.getValue(url)
asyncTasks_doGetNodeViewModel1.append(
asyncio.create_task(
self.doGetNodeViewModel1(
viewModel,
isUpdate,
tag,
urlA,
key,
page,
n1,
dataContext,
callback,
)
)
)
await asyncio.gather(*asyncTasks_doGetNodeViewModel1)
async def doGetNodeViewModel2(
self, viewModel, isUpdate, tag, url, args, config, dataContext, callback
):
# 适用于hots/updates/tags/book[1-7]/section等节点,他们的args都是None,还有book[8],它args是开发指南说的输入框{'输入框id': '[book8]id对应输入值'}。不适用search/tag/subtag节点,
# 需要对url进行转换成最新的格式(可能之前的旧的格式缓存)
asyncTasks_doGetNodeViewModel2 = []
if config.isEmpty():
return
if config.hasItems() and TextUtils.isEmpty(config.onParse):
viewModel.loadByConfig(config)
if "@null" == config.method:
url2 = config.getUrl(url, args)
if TextUtils.isEmpty(config.onParse):
viewModel.loadByJson(config, url2)
else:
viewModel.loadByJson(
config, self.parse(config, url2, Util.toJson(args))
)
return
if (
TextUtils.isEmpty(config.onParse) == False
and TextUtils.isEmpty(url) == False
):
# 如果没有url 和 parse,则不处理
msg = HttpMessage()
# 为doParseUrl_Aft服务(要在外围)
# Map<Integer, String> dataList = HashMap<>();#如果有多个地址,需要排序
# 2.获取主内容
msg.url = config.getUrl(url, args)
# 有缓存的话,可能会变成同步了
msg.rebuild(config)
msg.rebuildForm(args)
async def HttpCallback(code, sender, text, url302):
asyncTasks = []
tag.value += 1
if code == 1:
if TextUtils.isEmpty(url302):
url302 = sender.url
if TextUtils.isEmpty(config.onParseUrl) == False:
# 当hots/updates/tags节点有parseUrl时,运行 doParseUrl_Aft 实现parse步骤直接return callback到本类的caller,否则运行 doParse_hasAddin 实现parse步骤后回到本方法callback到本类的caller
# url需要解析出来(多个用;隔开)
newUrls = []
rstUrls = self.parseUrl(config, url302, text).split(";")
for url1 in rstUrls:
if url1.__len__() == 0:
continue
if url1.startswith(Util.NEXT_CALL):
SdApi.log(self, "CALL::url=", url1)
msg0 = HttpMessage()
msg0.url = (
url1.replace(Util.NEXT_CALL, "")
.replace("GET::", "")
.replace("POST::", "")
)
msg0.rebuild(config)
if url1.find("POST::") > 0:
msg0.method = "post"
msg0.rebuildForm(args)
else:
msg0.method = "get"
msg0.callback = msg.callback
tag.total += 1
asyncTasks.append(
asyncio.create_task(Util.http(self, isUpdate, msg0))
)
else:
newUrls.append(url1)
if newUrls.__len__() > 0:
asyncTasks.append(
asyncio.create_task(
self.doParseUrl_Aft(
viewModel,
config,
isUpdate,
newUrls,
args,
tag,
dataContext,
callback,
)
)
)
await asyncio.gather(*asyncTasks)
if asyncTasks.__len__() == 0 and tag.total == tag.value:
await callback(-2) # parseUrl函数出错时,引擎这样处理来退出
return # 下面的代码被停掉
else:
self.doParse_hasAddin(viewModel, config, url302, text)
if tag.total == tag.value:
await callback(code)
msg.callback = HttpCallback
tag.total += 1
asyncTasks_doGetNodeViewModel2.append(
asyncio.create_task(Util.http(self, isUpdate, msg))
)
if config.hasAdds():
# 2.2 获取副内容(可能有多个)
for n1 in config.adds():
if n1.isEmptyUrl():
continue
urlA = n1.url.getValue(url)
asyncTasks_doGetNodeViewModel2.append(
asyncio.create_task(
self.doGetNodeViewModel2(
viewModel,
isUpdate,
tag,
urlA,
args,
n1,
dataContext,
callback,
)
)
)
await asyncio.gather(*asyncTasks_doGetNodeViewModel2)
async def doParseUrl_Aft(
self, viewModel, config, isUpdate, newUrls, args, tag, dataContext, callback
):
asyncTasks = []
# tag.num += newUrls.__len__()
for newUrl2 in newUrls:
async def asyncLoopGetI(newUrl2):
| |
<gh_stars>1-10
import urllib.request as request
import json
import os
import random
import hashlib
import requests
import random
from pairio import client as pairio
from shutil import copyfile
from .steady_download_and_compute_sha1 import steady_download_and_compute_sha1
from datetime import datetime as dt
class KBucketClient():
def __init__(self):
self._config=dict(
share_ids=[], # remote kbucket shares to search for files
url=os.getenv('KBUCKET_URL','https://kbucket.flatironinstitute.org'), # the kbucket hub url
upload_share_id=None,
upload_token=None,
local_cache_dir=os.getenv('KBUCKET_CACHE_DIR','/tmp/sha1-cache'),
load_local=True,
load_remote=True,
save_remote=True
)
self._sha1_cache=Sha1Cache()
self._sha1_cache.setDirectory(self._config['local_cache_dir'])
self._nodeinfo_cache={}
self._verbose=False
def setConfig(self,*,
share_ids=None,
url=None,
upload_share_id=None,
upload_token=None,
local_cache_dir=None,
load_local=None,
load_remote=None, save_remote=None,
verbose=None
):
if share_ids is not None:
if type(share_ids)!=list:
raise Exception('share_ids must be a list')
self._config['share_ids']=share_ids
if url is not None:
self._config['url']=url
if upload_share_id:
if not upload_token:
raise Exception('Cannot set upload_share_id without upload token')
self._config['upload_share_id']=upload_share_id
if upload_token is not None:
self._config['upload_token']=upload_token
if local_cache_dir is not None:
self._config['local_cache_dir']=local_cache_dir
self._sha1_cache.setDirectory(self._config['local_cache_dir'])
if load_local is not None:
self._config['load_local']=load_local
if load_remote is not None:
self._config['load_remote']=load_remote
if save_remote is not None:
self._config['save_remote']=save_remote
if verbose is not None:
self._verbose=verbose
def getConfig(self):
ret=self._config.copy()
if ret['upload_token']:
ret['upload_token']=None
return ret
def testSaveRemote(self):
if not self._config['upload_share_id']:
raise Exception('Cannot test upload. Share id has not been set.')
print ('Testing upload to: '+self._config['upload_share_id'])
try:
self.saveObject({'test':'upload'},key={'test':'upload'},remote=True)
except:
raise Exception('Upload failed.')
print ('Test upload successful.')
def findFile(self,path=None,*,sha1=None,share_ids=None,key=None,collection=None,local=None,remote=None):
path, sha1, size = self._find_file_helper(path=path,sha1=sha1,share_ids=share_ids,key=key,collection=collection,local=local,remote=remote)
return path
def realizeFile(self,path=None,*,sha1=None,share_ids=None,target_path=None,key=None,collection=None,local=None,remote=None,verbose=True):
path, sha1, size = self._find_file_helper(path=path,sha1=sha1,share_ids=share_ids,key=key,collection=collection,local=local,remote=remote)
if not path:
return None
if not _is_url(path):
if target_path is not None:
if target_path==path:
return path
else:
copyfile(path,target_path)
return path
else:
return path
return self._sha1_cache.downloadFile(url=path,sha1=sha1,target_path=target_path,size=size,verbose=verbose)
def getFileSize(self, path=None,*,sha1=None,share_ids=None,key=None,collection=None,local=None,remote=None):
path, sha1, size = self._find_file_helper(path=path,sha1=sha1,share_ids=share_ids,key=key,collection=collection,local=local,remote=remote)
return size
def moveFileToCache(self,path):
return self._sha1_cache.moveFileToCache(path)
def copyFileToCache(self,path):
return self._sha1_cache.copyFileToCache(path)
def readDir(self,path,recursive=False,include_sha1=True):
if path.startswith('kbucket://'):
list=path.split('/')
share_id=_filter_share_id(list[2])
path0='/'.join(list[3:])
ret=self._read_kbucket_dir(share_id=share_id,path=path0,recursive=recursive,include_sha1=include_sha1)
else:
ret=self._read_file_system_dir(path=path,recursive=recursive,include_sha1=include_sha1)
return ret
def pairioClient(self):
return pairio
def _read_file_system_dir(self,*,path,recursive,include_sha1):
ret=dict(
files={},
dirs={}
)
list=_safe_list_dir(path)
for name0 in list:
path0=path+'/'+name0
if os.path.isfile(path0):
ret['files'][name0]=dict(
size=os.path.getsize(path0)
)
if include_sha1:
ret['files'][name0]['sha1']=self.computeFileSha1(path0)
elif os.path.isdir(path0):
ret['dirs'][name0]={}
if recursive:
ret['dirs'][name0]=self._read_file_system_dir(path=path0,recursive=recursive,include_sha1=include_sha1)
return ret
def _read_kbucket_dir(self,*,share_id,path,recursive,include_sha1):
url=self._config['url']+'/'+share_id+'/api/readdir/'+path
obj=self._http_get_json(url)
if not obj['success']:
return None
ret=dict(
files={},
dirs={}
)
for file0 in obj['files']:
name0=file0['name']
ret['files'][name0]=dict(
size=file0['size']
)
if include_sha1:
if 'prv' in file0:
ret['files'][name0]['sha1']=file0['prv']['original_checksum']
for dir0 in obj['dirs']:
name0=dir0['name']
ret['dirs'][name0]={}
if recursive:
ret['dirs'][name0]=_read_kbucket_dir(path+'/'+name0)
return ret
def computeFileSha1(self,path):
if path.startswith('sha1://'):
list=path.split('/')
sha1=list[2]
return sha1
elif path.startswith('kbucket://'):
path, sha1, size = self._find_file_helper(path=path)
return sha1
else:
return self._sha1_cache.computeFileSha1(path)
def computeDirHash(self,path):
dd=self.readDir(path=path,recursive=True,include_sha1=True)
return _sha1_of_object(dd)
def _save_file_helper(self,path,share_id=None,upload_token=None,basename=None,remote=None):
if remote is None:
remote=self._config['save_remote']
if not share_id:
share_id=self._config['upload_share_id']
if share_id:
share_id=_filter_share_id(share_id)
if basename is None:
basename=os.path.basename(path)
if not upload_token:
upload_token=self._config['upload_token']
if (remote) and (share_id) and (not upload_token):
raise Exception('Upload token not set for share_id='+share_id)
path=self.realizeFile(path)
if not path:
raise Exception('Unable to realize file for upload.')
sha1=self.computeFileSha1(path)
ret_path='sha1://{}/{}'.format(sha1,basename)
self.copyFileToCache(path)
if (not remote) or (not share_id):
# share_id not set... not uploading.
return ret_path
url00,size00=self._find_in_share(sha1=sha1,share_id=share_id)
if url00:
print ('Already on server.')
return ret_path
server_url=self._get_cas_upload_url_for_share(share_id=share_id)
url_check_path0='/check/'+sha1
signature=_sha1_of_object({'path':url_check_path0,'token':upload_token})
url_check=server_url+url_check_path0+'?signature='+signature+'&size={}'.format(os.path.getsize(path))
resp_obj=self._http_get_json(url_check)
if not resp_obj['success']:
raise Exception('Problem checking for upload: '+resp_obj['error'])
if not resp_obj['okay_to_upload']:
print ('Cannot upload: '+resp_obj['message'])
return
if not resp_obj['found']:
url_path0='/upload/'+sha1
signature=_sha1_of_object({'path':url_path0,'token':upload_token})
url=server_url+url_path0+'?signature='+signature
resp_obj=_http_post_file_data(url,path)
if not resp_obj['success']:
raise Exception('Problem posting file data: '+resp_obj['error'])
else:
print ('Already on server (*)')
return ret_path
def saveFile(self,fname,*,key=None,share_id=None,upload_token=None,basename=None,remote=None):
ret=self._save_file_helper(fname,share_id=share_id,upload_token=upload_token,basename=basename,remote=remote)
if key:
sha1=self.computeFileSha1(fname)
pairio.set(key,sha1)
return ret
def saveObject(self,object,*,key,format='json',share_id=None,upload_token=None,remote=None):
tmp_fname=self._create_temporary_file_for_object(object=object,format=format)
try:
fname=self.moveFileToCache(tmp_fname)
except:
_safe_remove_file(tmp_fname)
raise
self.saveFile(fname,share_id=share_id,upload_token=upload_token,key=key,basename='object.json',remote=remote)
def loadObject(self,*,format='json',share_ids=None,key=None,collection=None,local=None,remote=None):
fname=self.realizeFile(share_ids=share_ids,key=key,collection=collection,local=local,remote=remote)
if fname is None:
raise Exception('Unable to find file.')
if format=='json':
ret=_read_json_file(fname)
if not ret:
raise Exception('Unable to read or parse json file: '+fname)
else:
raise Exception('Unsupported format in loadObject: '+format)
return ret
def getTemporaryFileName(self,fname):
return self._create_temporary_fname(fname)
def _create_temporary_file_for_object(self,*,object,format):
tmp_fname=self._create_temporary_fname('object.json')
if format=='json':
_write_json_file(object,tmp_fname)
else:
raise Exception('Unsupported format in saveObject: '+format)
return tmp_fname
def _create_temporary_fname(self,fname):
return self._config['local_cache_dir']+'/tmp_kucketclient_'+''.join(random.choices('abcdefghijklmnopqrstuvwxyz', k=10))+'.'+fname
def getNodeInfo(self,share_id):
if share_id in self._nodeinfo_cache:
return self._nodeinfo_cache[share_id]
share_id=_filter_share_id(share_id)
url=self._config['url']+'/'+share_id+'/api/nodeinfo'
ret=self._http_get_json(url)
if ret:
self._nodeinfo_cache[share_id]=ret
return ret
def _find_file_helper(self,*,path,sha1=None,share_ids=None,key=None,collection=None,local=None,remote=None):
if local is None:
local=self._config['load_local']
if remote is None:
remote=self._config['load_remote']
if share_ids is None:
share_ids=self._config['share_ids']
if key is not None:
sha1=pairio.get(key=key,collection=collection)
if not sha1:
return (None,None,None)
if path is not None:
if sha1 is not None:
raise Exception('Cannot specify both path and sha1 in find file')
if path.startswith('sha1://'):
list=path.split('/')
sha1=list[2]
### continue to below
elif path.startswith('kbucket://'):
list=path.split('/')
share_ids=[_filter_share_id(list[2])]
path0='/'.join(list[3:])
prv=self._get_prv_for_file(share_id=share_ids[0],path=path0)
if not prv:
return (None, None, None)
sha1=prv['original_checksum']
remote=True
### continue to below
else:
if os.path.exists(path) and os.path.isfile(path):
return (path, None, os.path.getsize(path))
else:
return (None, None, None)
# search locally
if local:
path=self._sha1_cache.findFile(sha1=sha1)
else:
path=''
if path:
return (path,sha1,os.path.getsize(path))
if remote:
for id in share_ids:
url,size=self._find_in_share(sha1=sha1,share_id=id)
if url:
return (url,sha1,size)
return (None,None,None)
def _get_prv_for_file(self,*,share_id,path):
url=self._config['url']+'/'+share_id+'/prv/'+path
try:
obj=self._http_get_json(url)
except:
return None
return obj
def _find_in_share(self,*,sha1,share_id):
share_id=_filter_share_id(share_id)
url=self._config['url']+'/'+share_id+'/api/find/'+sha1
obj=self._http_get_json(url)
if not obj['success']:
raise Exception('Error finding file in share: '+obj['error'])
if not obj['found']:
return (None,None)
urls0=obj['urls']
results0=obj['results']
for timeout in [0.5,2]: ## probably not a good idea
for url0 in urls0:
if _test_url_accessible(url0,timeout=timeout):
size0=results0[0]['size']
return (url0,size0)
return (None,None)
def _get_cas_upload_url_for_share(self,share_id):
node_info=self.getNodeInfo(share_id)
if not node_info:
raise Exception('Unable to get node info for share: '+share_id)
if not 'info' in node_info:
raise Exception('node_info does not have info field for share: '+share_id)
if not 'cas_upload_url' in node_info['info']:
raise Exception('node_info does not have info.cas_upload_url field for share: '+share_id)
return node_info['info'].get('cas_upload_url',None)
def _http_get_json(self,url):
return _http_get_json(url,verbose=self._verbose)
def _http_get_json(url,verbose=False):
timer=dt.now()
if verbose:
print ('_http_get_json::: '+url)
try:
req=request.urlopen(url)
except:
raise Exception('Unable to open url: '+url)
try:
ret=json.load(req)
except:
raise Exception('Unable to load json from url: '+url)
if verbose:
print ('done.')
return ret
def _http_post_file_data(url,fname):
with open(fname, 'rb') as f:
try:
obj=requests.post(url, data=f)
except:
raise Exception('Error posting file data.')
if obj.status_code!=200:
raise Exception('Error posting file data: {} {}'.format(obj.status_code,obj.content.decode('utf-8')))
return json.loads(obj.content)
def _test_url_accessible(url,timeout):
try:
req = request.Request(url, method="HEAD")
code=request.urlopen(req,timeout=timeout).getcode()
return (code==200)
except:
return False
def _is_url(path):
return ((path.startswith('http://')) or (path.startswith('https://')))
_filter_share_id_cache={}
def _filter_share_id(id):
if id in _filter_share_id_cache:
return _filter_share_id_cache[id]
if '.' in id:
list=id.split('.')
if len(list)!=2:
return id
ret=pairio.get(list[1],collection=list[0])
if ret:
_filter_share_id_cache[id]=ret
return ret
else:
return id
def _safe_list_dir(path):
try:
ret=os.listdir(path)
return ret
except:
print ('Warning: unable to listdir: '+path)
return []
# TODO: implement cleanup() for Sha1Cache
# removing .record.json and .hints.json files that are no longer relevant
class Sha1Cache():
def __init__(self):
self._directory=''
def setDirectory(self,directory):
if not os.path.exists(directory):
os.mkdir(directory)
self._directory=directory
def findFile(self,sha1):
path=self._get_path(sha1,create=False)
if os.path.exists(path):
return path
hints_fname=path+'.hints.json'
if os.path.exists(hints_fname):
hints=_read_json_file(hints_fname)
if hints and ('files' in hints):
files=hints['files']
matching_files=[]
for file in files:
path0=file['stat']['path']
if os.path.exists(path0) and os.path.isfile(path0):
stat_obj0=_get_stat_object(path0)
if stat_obj0:
if (_stat_objects_match(stat_obj0,file['stat'])):
to_return=path0
matching_files.append(file)
if len(matching_files)>0:
hints['files']=matching_files
try:
_write_json_file(hints,hints_fname)
except:
print ('Warning: problem writing hints file: '+hints_fname)
return matching_files[0]['stat']['path']
else:
_safe_remove_file(hints_fname)
else:
print ('Warning: failed to load hints json file, or invalid file. Removing: '+hints_fname)
_safe_remove_file(hints_fname)
def downloadFile(self,url,sha1,target_path=None,size=None,verbose=True):
alternate_target_path=False
if target_path is None:
target_path=self._get_path(sha1,create=True)
else:
alternate_target_path=True
path_tmp=target_path+'.downloading'
size_mb='unknown'
if size:
size_mb=int(size/(1024*1024)*10)/10
if verbose:
print ('Downloading file --- ({} MB): {} -> {}'.format(size_mb,url,target_path))
sha1b=steady_download_and_compute_sha1(url=url,target_path=path_tmp)
if not sha1b:
if os.path.exists(path_tmp):
_safe_remove_file(path_tmp)
if sha1!=sha1b:
if os.path.exists(path_tmp):
_safe_remove_file(path_tmp)
raise Exception('sha1 of downloaded file does not match expected {} {}'.format(url,sha1))
if os.path.exists(target_path):
_safe_remove_file(target_path)
os.rename(path_tmp,target_path)
if alternate_target_path:
self.computeFileSha1(target_path,_known_sha1=sha1)
return target_path
def moveFileToCache(self,path):
sha1=self.computeFileSha1(path)
path0=self._get_path(sha1,create=True)
if os.path.exists(path0):
if path!=path0:
_safe_remove_file(path)
else:
os.rename(path,path0)
return path0
def copyFileToCache(self,path):
sha1=self.computeFileSha1(path)
path0=self._get_path(sha1,create=True)
if not os.path.exists(path0):
copyfile(path,path0+'.copying')
os.rename(path0+'.copying',path0)
return path0
def computeFileSha1(self,path,_known_sha1=None):
aa=_get_stat_object(path)
aa_hash=_compute_string_sha1(json.dumps(aa, sort_keys=True))
path0=self._get_path(aa_hash,create=True)+'.record.json'
if os.path.exists(path0):
obj=_read_json_file(path0)
if obj:
bb=obj['stat']
if _stat_objects_match(aa,bb):
if obj.get('sha1',None):
return obj['sha1']
if _known_sha1 is None:
sha1=_compute_file_sha1(path)
else:
sha1=_known_sha1
if not sha1:
return None
obj=dict(
sha1=sha1,
stat=aa
)
try:
_write_json_file(obj,path0)
except:
print ('Warning: problem writing .record.json file: '+path0)
path1=self._get_path(sha1,create=True,directory=self._directory)+'.hints.json'
if os.path.exists(path1):
hints=_read_json_file(path1)
else:
hints=None
if not hints:
hints={'files':[]}
hints['files'].append(obj)
try:
_write_json_file(hints,path1)
except:
print ('Warning: problem writing .hints.json file: '+path1)
## todo: use hints for findFile
return sha1
def _get_path(self,sha1,*,create=True,directory=None):
if directory is None:
directory=self._directory
path0=directory+'/{}/{}{}'.format(sha1[0],sha1[1],sha1[2])
if create:
if not os.path.exists(path0):
os.makedirs(path0)
return path0+'/'+sha1
#def _download_and_compute_sha1(self,url,path):
# hh = hashlib.sha1()
# response=requests.get(url,stream=True)
# path_tmp=path+'.'+_random_string(6)
# with open(path_tmp,'wb') as f:
# for chunk in response.iter_content(chunk_size=512):
# if chunk: # filter out keep-alive new chunks
# hh.update(chunk)
# f.write(chunk)
# os.rename(path_tmp,path)
# return hh.hexdigest()
def _compute_file_sha1(path):
if (os.path.getsize(path)>1024*1024*100):
print ('Computing sha1 of {}'.format(path))
BLOCKSIZE = 65536
sha = hashlib.sha1()
with open(path, 'rb') as file:
buf = file.read(BLOCKSIZE)
while len(buf) > 0:
sha.update(buf)
buf = file.read(BLOCKSIZE)
return sha.hexdigest()
def _get_stat_object(fname):
try:
stat0=os.stat(fname)
obj=dict(
path=fname,
size=stat0.st_size,
ino=stat0.st_ino,
mtime=stat0.st_mtime,
ctime=stat0.st_ctime
)
return obj
except:
return None
def _stat_objects_match(aa,bb):
str1=json.dumps(aa, sort_keys=True)
str2=json.dumps(bb, sort_keys=True)
return (str1==str2)
def _compute_string_sha1(txt):
hash_object = hashlib.sha1(txt.encode('utf-8'))
return hash_object.hexdigest()
def _sha1_of_object(obj):
txt=json.dumps(obj, sort_keys=True, separators=(',', ':'))
return _compute_string_sha1(txt)
def _safe_remove_file(fname):
try:
os.remove(fname)
except:
print ('Warning: unable to remove file that we thought existed: '+fname)
def _read_json_file(path):
| |
pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="shardName")
def shard_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the shard to which the host belongs.
"""
return pulumi.get(self, "shard_name")
@shard_name.setter
def shard_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shard_name", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@pulumi.input_type
class MdbClickhouseClusterMaintenanceWindowArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
day: Optional[pulumi.Input[str]] = None,
hour: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] type: Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. A day and hour of window need to be specified with weekly window.
:param pulumi.Input[str] day: Day of week for maintenance window if window type is weekly. Possible values: `MON`, `TUE`, `WED`, `THU`, `FRI`, `SAT`, `SUN`.
:param pulumi.Input[int] hour: Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
"""
pulumi.set(__self__, "type", type)
if day is not None:
pulumi.set(__self__, "day", day)
if hour is not None:
pulumi.set(__self__, "hour", hour)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. A day and hour of window need to be specified with weekly window.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def day(self) -> Optional[pulumi.Input[str]]:
"""
Day of week for maintenance window if window type is weekly. Possible values: `MON`, `TUE`, `WED`, `THU`, `FRI`, `SAT`, `SUN`.
"""
return pulumi.get(self, "day")
@day.setter
def day(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "day", value)
@property
@pulumi.getter
def hour(self) -> Optional[pulumi.Input[int]]:
"""
Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
"""
return pulumi.get(self, "hour")
@hour.setter
def hour(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "hour", value)
@pulumi.input_type
class MdbClickhouseClusterMlModelArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
type: pulumi.Input[str],
uri: pulumi.Input[str]):
"""
:param pulumi.Input[str] name: Graphite rollup configuration name.
:param pulumi.Input[str] type: Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. A day and hour of window need to be specified with weekly window.
:param pulumi.Input[str] uri: Model file URL. You can only use models stored in Yandex Object Storage.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Graphite rollup configuration name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. A day and hour of window need to be specified with weekly window.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def uri(self) -> pulumi.Input[str]:
"""
Model file URL. You can only use models stored in Yandex Object Storage.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: pulumi.Input[str]):
pulumi.set(self, "uri", value)
@pulumi.input_type
class MdbClickhouseClusterShardGroupArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
shard_names: pulumi.Input[Sequence[pulumi.Input[str]]],
description: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: Graphite rollup configuration name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] shard_names: List of shards names that belong to the shard group.
:param pulumi.Input[str] description: Description of the shard group.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "shard_names", shard_names)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Graphite rollup configuration name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="shardNames")
def shard_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of shards names that belong to the shard group.
"""
return pulumi.get(self, "shard_names")
@shard_names.setter
def shard_names(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "shard_names", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the shard group.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class MdbClickhouseClusterUserArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
password: pulumi.Input[str],
permissions: Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterUserPermissionArgs']]]] = None,
quotas: Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterUserQuotaArgs']]]] = None,
settings: Optional[pulumi.Input['MdbClickhouseClusterUserSettingsArgs']] = None):
"""
:param pulumi.Input[str] name: Graphite rollup configuration name.
:param pulumi.Input[str] password: <PASSWORD>.
:param pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterUserPermissionArgs']]] permissions: Set of permissions granted to the user. The structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterUserQuotaArgs']]] quotas: Set of user quotas. The structure is documented below.
:param pulumi.Input['MdbClickhouseClusterUserSettingsArgs'] settings: Kafka connection settngs sanem as `kafka` block.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "password", password)
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
if quotas is not None:
pulumi.set(__self__, "quotas", quotas)
if settings is not None:
pulumi.set(__self__, "settings", settings)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Graphite rollup configuration name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
RabbitMQ user password.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterUserPermissionArgs']]]]:
"""
Set of permissions granted to the user. The structure is documented below.
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterUserPermissionArgs']]]]):
pulumi.set(self, "permissions", value)
@property
@pulumi.getter
def quotas(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterUserQuotaArgs']]]]:
"""
Set of user quotas. The structure is documented below.
"""
return pulumi.get(self, "quotas")
@quotas.setter
def quotas(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterUserQuotaArgs']]]]):
pulumi.set(self, "quotas", value)
@property
@pulumi.getter
def settings(self) -> Optional[pulumi.Input['MdbClickhouseClusterUserSettingsArgs']]:
"""
Kafka connection settngs sanem as `kafka` block.
"""
return pulumi.get(self, "settings")
@settings.setter
def settings(self, value: Optional[pulumi.Input['MdbClickhouseClusterUserSettingsArgs']]):
pulumi.set(self, "settings", value)
@pulumi.input_type
class MdbClickhouseClusterUserPermissionArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The name of the database that the permission grants access to.
"""
pulumi.set(__self__, "database_name", database_name)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The name of the database that the permission grants access to.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@pulumi.input_type
class MdbClickhouseClusterUserQuotaArgs:
def __init__(__self__, *,
interval_duration: pulumi.Input[int],
errors: Optional[pulumi.Input[int]] = None,
execution_time: Optional[pulumi.Input[int]] = None,
queries: Optional[pulumi.Input[int]] = None,
read_rows: Optional[pulumi.Input[int]] = None,
result_rows: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] interval_duration: Duration of interval for quota in milliseconds.
:param pulumi.Input[int] errors: The number of queries that threw exception.
:param pulumi.Input[int] execution_time: The total query execution time, in milliseconds (wall time).
:param pulumi.Input[int] queries: The total number of queries.
:param pulumi.Input[int] read_rows: The total number of source rows read from tables for running the query, on all remote servers.
:param pulumi.Input[int] result_rows: The total number of rows given as the result.
"""
pulumi.set(__self__, "interval_duration", interval_duration)
if errors is not None:
pulumi.set(__self__, "errors", errors)
if execution_time is not None:
pulumi.set(__self__, "execution_time", execution_time)
if queries is not None:
pulumi.set(__self__, "queries", queries)
if read_rows is not None:
pulumi.set(__self__, "read_rows", read_rows)
if result_rows is not None:
pulumi.set(__self__, "result_rows", result_rows)
@property
@pulumi.getter(name="intervalDuration")
def interval_duration(self) -> pulumi.Input[int]:
"""
Duration of interval for quota in milliseconds.
"""
return pulumi.get(self, "interval_duration")
@interval_duration.setter
def interval_duration(self, value: pulumi.Input[int]):
pulumi.set(self, "interval_duration", value)
@property
@pulumi.getter
def errors(self) -> Optional[pulumi.Input[int]]:
"""
The number of queries that threw exception.
"""
return pulumi.get(self, "errors")
@errors.setter
def errors(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "errors", value)
@property
@pulumi.getter(name="executionTime")
def execution_time(self) -> Optional[pulumi.Input[int]]:
"""
The total query execution time, in milliseconds (wall time).
"""
return pulumi.get(self, "execution_time")
@execution_time.setter
def execution_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "execution_time", value)
@property
@pulumi.getter
def queries(self) -> Optional[pulumi.Input[int]]:
"""
The total number of queries.
"""
return pulumi.get(self, "queries")
@queries.setter
def queries(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "queries", value)
@property
@pulumi.getter(name="readRows")
def read_rows(self) -> Optional[pulumi.Input[int]]:
"""
The total number of source rows read from tables for running the query, on all remote servers.
"""
return pulumi.get(self, "read_rows")
@read_rows.setter
def read_rows(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "read_rows", value)
@property
@pulumi.getter(name="resultRows")
def result_rows(self) -> Optional[pulumi.Input[int]]:
"""
The total number of rows given as the result.
"""
return pulumi.get(self, "result_rows")
@result_rows.setter
def result_rows(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "result_rows", value)
@pulumi.input_type
class MdbClickhouseClusterUserSettingsArgs:
def __init__(__self__, *,
add_http_cors_header: Optional[pulumi.Input[bool]] = None,
allow_ddl: Optional[pulumi.Input[bool]] = None,
compile: Optional[pulumi.Input[bool]] = None,
compile_expressions: Optional[pulumi.Input[bool]] = None,
connect_timeout: Optional[pulumi.Input[int]] = None,
count_distinct_implementation: Optional[pulumi.Input[str]] = None,
distinct_overflow_mode: Optional[pulumi.Input[str]] = None,
distributed_aggregation_memory_efficient: Optional[pulumi.Input[bool]] = None,
distributed_ddl_task_timeout: Optional[pulumi.Input[int]] = None,
distributed_product_mode: Optional[pulumi.Input[str]] = None,
empty_result_for_aggregation_by_empty_set: Optional[pulumi.Input[bool]] = None,
enable_http_compression: Optional[pulumi.Input[bool]] = None,
fallback_to_stale_replicas_for_distributed_queries: Optional[pulumi.Input[bool]] = None,
force_index_by_date: Optional[pulumi.Input[bool]] = None,
force_primary_key: Optional[pulumi.Input[bool]] = None,
group_by_overflow_mode: Optional[pulumi.Input[str]] = None,
group_by_two_level_threshold: Optional[pulumi.Input[int]] = None,
group_by_two_level_threshold_bytes: Optional[pulumi.Input[int]] = None,
http_connection_timeout: Optional[pulumi.Input[int]] = None,
http_headers_progress_interval: Optional[pulumi.Input[int]] = None,
| |
<filename>sklearn/discriminant_analysis.py
"""
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: <NAME>
# <NAME>
# <NAME>
# <NAME>
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_classes, n_features)
Class means.
"""
classes, y = np.unique(y, return_inverse=True)
cnt = np.bincount(y)
means = np.zeros(shape=(len(classes), X.shape[1]))
np.add.at(means, y, X)
means /= cnt[:, None]
return means
def _class_cov(X, y, priors, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
cov = np.zeros(shape=(X.shape[1], X.shape[1]))
for idx, group in enumerate(classes):
Xg = X[y == group, :]
cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage))
return cov
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False), used
only in 'svd' solver.
.. versionadded:: 0.17
tol : float, optional, (default 1.0e-4)
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
| |
= Sound.multitone_masker()
sig = sig.ramp()
sig.spectrum()
"""
if samplerate is None:
samplerate = slab.signal._default_samplerate
duration = Sound.in_samples(duration, samplerate)
erb_freqs, _, _ = Filter._center_freqs( # get center_freqs
low_cutoff=low_cutoff, high_cutoff=high_cutoff, bandwidth=bandwidth)
freqs = slab.Filter._erb2freq(erb_freqs)
rand_phases = numpy.random.rand(len(freqs)) * 2 * numpy.pi
sig = Sound.tone(frequency=freqs, duration=duration,
phase=rand_phases, samplerate=samplerate)
data = numpy.sum(sig.data, axis=1) / len(freqs) # collapse across channels
out = Sound(data, samplerate=samplerate)
out.level = level
return out
@staticmethod
def equally_masking_noise(duration=1.0, low_cutoff=125, high_cutoff=4000, samplerate=None, level=None):
"""
Generate an equally-masking noise (ERB noise) within a given frequency band.
Arguments:
duration (float | int): duration of the sound in seconds (given a float) or in samples (given an int).
low_cutoff (int | float): the lower frequency limit of the noise in Hz
high_cutoff (int | float): the upper frequency limit of the noise in Hz
samplerate (int | None): the samplerate of the sound. If None, use the default samplerate.
level (None | int | float | list): the sounds level in decibel. For a multichannel sound, a list of values
can be provided to set the level of each channel individually. If None, the level is set to the default
Returns:
(slab.Sound): equally masking noise noise, generated from the given parameters.
Examples::
sig = Sound.erb_noise()
sig.spectrum()
"""
if samplerate is None:
samplerate = slab.signal._default_samplerate
duration = Sound.in_samples(duration, samplerate)
n = 2 ** (duration - 1).bit_length() # next power of 2
st = 1 / samplerate
df = 1 / (st * n)
frq = df * numpy.arange(n / 2)
frq[0] = 1 # avoid DC = 0
lev = -10 * numpy.log10(24.7 * (4.37 * frq))
filt = 10. ** (lev / 20)
noise = numpy.random.randn(n)
noise = numpy.real(numpy.fft.ifft(numpy.concatenate(
(filt, filt[::-1])) * numpy.fft.fft(noise)))
noise = noise / numpy.sqrt(numpy.mean(noise ** 2))
band = numpy.zeros(len(lev))
band[round(low_cutoff / df):round(high_cutoff / df)] = 1
fnoise = numpy.real(numpy.fft.ifft(numpy.concatenate(
(band, band[::-1])) * numpy.fft.fft(noise)))
fnoise = fnoise[:duration]
out = Sound(data=fnoise, samplerate=samplerate)
out.level = level
return out
@staticmethod
def sequence(*sounds):
"""
Join sounds into a new sound object.
Arguments:
*sounds (slab.Sound): two or more sounds to combine.
Returns:
(slab.Sound): the input sounds combined in a single object.
"""
samplerate = sounds[0].samplerate
for sound in sounds:
if sound.samplerate != samplerate:
raise ValueError('All sounds must have the same sample rate.')
samplerate = sounds[0].samplerate
for sound in sounds:
if sound.samplerate != samplerate:
raise ValueError('All sounds must have the same sample rate.')
sounds = tuple(s.data for s in sounds)
x = numpy.vstack(sounds)
return Sound(x, samplerate)
# instance methods
def write(self, filename, normalise=True, fmt='WAV'):
"""
Save the sound as a WAV.
Arguments:
filename (str | pathlib.Path): path, the file is written to.
normalise (bool): if True, the maximal amplitude of the sound is normalised to 1.
fmt (str): data format to write. See soundfile.available_formats().
"""
if soundfile is False:
raise ImportError(
'Writing wav files requires SoundFile (pip install SoundFile).')
if isinstance(filename, pathlib.Path):
filename = str(filename)
if normalise:
soundfile.write(filename, self.data / numpy.amax(numpy.abs(self.data)), self.samplerate, format=fmt)
else:
if self.data.max(initial=0) > 1.0:
print("There are data points in the signal that will be clipped. Normalization is recommended!")
soundfile.write(filename, self.data, self.samplerate, format=fmt)
def ramp(self, when='both', duration=0.01, envelope=None):
"""
Adds an on and/or off ramp to the sound.
Arguments:
when (str): can take values 'onset', 'offset' or 'both'
duration (float | int): duration of the sound in seconds (given a float) or in samples (given an int).
envelope(callable): function to compute the samples of the ramp, defaults to a sinusoid
Returns:
(slab.Sound): copy of the sound with the added ramp(s)
"""
sound = copy.deepcopy(self)
when = when.lower().strip()
if envelope is None:
envelope = lambda t: numpy.sin(numpy.pi * t / 2) ** 2 # squared sine window
sz = Sound.in_samples(duration, sound.samplerate)
multiplier = envelope(numpy.reshape(numpy.linspace(0.0, 1.0, sz), (sz, 1)))
if when in ('onset', 'both'):
sound.data[:sz, :] *= multiplier
if when in ('offset', 'both'):
sound.data[sound.n_samples - sz:, :] *= multiplier[::-1]
return sound
def repeat(self, n):
"""
Repeat the sound n times.
Arguments:
n (int): the number of repetitions.
Returns:
(slab.Sound): copy of the sound repeated n times.
"""
sound = copy.deepcopy(self)
sound.data = numpy.vstack((sound.data,) * int(n))
return sound
@staticmethod
def crossfade(*sounds, overlap=0.01):
"""
Crossfade several sounds.
Arguments:
*sounds (instances of slab.Sound): sounds to crossfade
overlap (float | int): duration of the overlap between the cross-faded sounds in seconds (given a float)
or in samples (given an int).
Returns:
(slab.Sound): A single sound that contains all input sounds cross-faded. The duration will be the
sum of the input sound's durations minus the overlaps.
Examples::
noise = Sound.whitenoise(duration=1.0)
vowel = Sound.vowel()
noise2vowel = Sound.crossfade(vowel, noise, vowel, overlap=0.4)
noise2vowel.play()
"""
sounds = list(sounds)
if any([sound.duration < overlap * 2 for sound in sounds]):
raise ValueError('The overlap can not be longer then the half of the sound.')
if len({sound.n_channels for sound in sounds}) != 1:
raise ValueError('Cannot crossfade sounds with unequal numbers of channels.')
if len({sound.samplerate for sound in sounds}) != 1:
raise ValueError('Cannot crossfade sounds with unequal samplerates.')
overlap = Sound.in_samples(overlap, samplerate=sounds[0].samplerate)
n_total = sum([sound.n_samples for sound in sounds]) - overlap * (len(sounds) - 1)
# give each sound an offset and onset ramp and add silence to them. The length of the silence added to the
# beginning and end of the sound is equal to the length of the sounds that come before or after minus overlaps
n_previous = 0
for i, sound in enumerate(sounds):
n_samples = sound.n_samples
if i == 0:
sound = sound.ramp(duration=overlap, when="offset") # for the first sound only add offset ramp
sounds[i] = sound.resize(n_total)
else:
n_silence_before = n_previous - overlap * i
n_silence_after = n_total - n_silence_before - sound.n_samples
if i == len(sounds) - 1:
sound = sound.ramp(duration=overlap, when="onset") # for the last sound only add onset ramp
sounds[i] = Sound.sequence(
Sound.silence(n_silence_before, samplerate=sound.samplerate, n_channels=sound.n_channels),
sound)
else:
sound = sound.ramp(duration=overlap, when="both") # for all other sounds add both
sounds[i] = Sound.sequence(
Sound.silence(n_silence_before, samplerate=sound.samplerate, n_channels=sound.n_channels),
sound,
Sound.silence(n_silence_after, samplerate=sound.samplerate, n_channels=sound.n_channels))
n_previous += n_samples
sound = sum(sounds)
return sound
def pulse(self, frequency=4, duty=0.75, gate_time=0.005):
"""
Apply a pulsed envelope to the sound.
Arguments:
frequency (float): the frequency of pulses in Hz.
duty (float): duty cycle, i.e. ratio between the pulse duration and pulse period,
values must be between 1 (always high) and 0 (always low). When using values close to 0, `gate_time`
may need to be decreased to avoid on and off ramps being longer than the pulse.
gate_time (float): rise/fall time of each pulse in seconds
Returns:
slab.Sound: pulsed copy of the instance.
"""
sound = copy.deepcopy(self)
pulse_period = 1 / frequency
n_pulses = round(sound.duration / pulse_period) # number of pulses in the stimulus
pulse_period = sound.duration / n_pulses # period in s, fits into stimulus duration
pulse_samples = Sound.in_samples(pulse_period * duty, sound.samplerate)
fall_samples = Sound.in_samples(gate_time, sound.samplerate) # 5ms rise/fall time
if (pulse_samples - 2 * fall_samples) < 0:
raise ValueError(f'The pulse duration {pulse_samples} is shorter than the combined ramps'
f'({fall_samples} each). Reduce ´pulse_frequency´ or `gate_time`!')
fall = numpy.cos(numpy.pi * numpy.arange(fall_samples) / (2 * fall_samples)) ** 2
pulse = numpy.concatenate((1 - fall, numpy.ones(pulse_samples - 2 * fall_samples), fall))
pulse = numpy.concatenate(
(pulse, numpy.zeros(Sound.in_samples(pulse_period, sound.samplerate) - len(pulse))))
envelope = numpy.tile(pulse, n_pulses)
envelope = envelope[:, None] # add an empty axis to get to the same shape as sound.data
# if data is 2D (>1 channel) broadcast the envelope to fit
sound.data *= numpy.broadcast_to(envelope, sound.data.shape)
return sound
def am(self, frequency=10, depth=1, phase=0):
"""
Apply an amplitude modulation to the sound by multiplication with a sine function.
Arguments:
frequency (int): frequency of the modulating sine function in Hz
depth (int, float): modulation depth/index of the modulating sine function
phase (int, float): initial phase of the modulating sine function
Returns:
slab.Sound: amplitude modulated copy of the instance.
"""
sound = copy.deepcopy(self)
envelope = (1 + depth * numpy.sin(2 * numpy.pi * frequency * sound.times + phase))
envelope = envelope[:, None]
sound.data *= numpy.broadcast_to(envelope, sound.data.shape)
return sound
def filter(self, frequency=100, kind='hp'):
"""
Convenient | |
# -*- coding: utf-8 -*-
# Copyright 2009-2018, <NAME>, <EMAIL>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculations for the position of the sun and moon.
The :mod:`astral` module provides the means to calculate dawn, sunrise,
solar noon, sunset, dusk and rahukaalam times, plus solar azimuth and
elevation, for specific locations or at a specific latitude/longitude. It can
also calculate the moon phase for a specific date.
The module provides 2 main classes :class:`Astral` and :class:`Location`.
:class:`Astral`
Has 2 main responsibilities
* Calculates the events in the UTC timezone.
* Provides access to location data
:class:`Location`
Holds information about a location and provides functions to calculate
the event times for the location in the correct time zone.
For example ::
>>> from astral import *
>>> a = Astral()
>>> location = a['London']
>>> print('Information for %s' % location.name)
Information for London
>>> timezone = location.timezone
>>> print('Timezone: %s' % timezone)
Timezone: Europe/London
>>> print('Latitude: %.02f; Longitude: %.02f' % (location.latitude,
... location.longitude))
Latitude: 51.60; Longitude: 0.05
>>> from datetime import date
>>> d = date(2009,4,22)
>>> sun = location.sun(local=True, date=d)
>>> print('Dawn: %s' % str(sun['dawn']))
Dawn: 2009-04-22 05:12:56+01:00
The module currently provides 2 methods of obtaining location information;
:class:`AstralGeocoder` (the default, which uses information from within the
module) and :class:`GoogleGeocoder` (which obtains information from Google's
Map Service.)
To use the :class:`GoogleGeocoder` pass the class as the `geocoder` parameter
to :meth:`Astral.__init__` or by setting the `geocoder` property to an
instance of :class:`GoogleGeocoder`::
>>> from astral import GoogleGeocoder
>>> a = Astral(GoogleGeocoder)
or ::
>>> from astral import GoogleGeocoder
>>> a = Astral()
>>> a.geocoder = GoogleGeocoder()
"""
from __future__ import unicode_literals, division
try:
import pytz
except ImportError:
raise ImportError(
("The astral module requires the " "pytz module to be available.")
)
try:
import requests
except ImportError:
raise ImportError(
("The astral module requires the " "requests module to be available.")
)
import datetime
from time import time
from math import cos, sin, tan, acos, asin, atan2, floor, ceil
from math import radians, degrees, pow
from numbers import Number
import sys
try:
from urllib import quote_plus
except ImportError:
from urllib.parse import quote_plus
try:
from urllib2 import URLError
except ImportError:
from urllib.request import URLError
try:
import simplejson as json
except ImportError:
import json
if sys.version_info[0] >= 3:
ustr = str
else:
ustr = unicode # pylint: disable=E0602
__all__ = ["Astral", "Location", "AstralGeocoder", "GoogleGeocoder", "AstralError"]
__version__ = "1.6.1"
__author__ = "<NAME> <<EMAIL>>"
SUN_RISING = 1
SUN_SETTING = -1
# name,region,latitude,longitude,timezone,elevation
_LOCATION_INFO = """Abu Dhabi,UAE,24°28'N,54°22'E,Asia/Dubai,5
Abu Dhabi,United Arab Emirates,24°28'N,54°22'E,Asia/Dubai,5
Abuja,Nigeria,09°05'N,07°32'E,Africa/Lagos,342
Accra,Ghana,05°35'N,00°06'W,Africa/Accra,61
Addis Ababa,Ethiopia,09°02'N,38°42'E,Africa/Addis_Ababa,2355
Adelaide,Australia,34°56'S,138°36'E,Australia/Adelaide,50
Al Jubail,Saudi Arabia,25°24'N,49°39'W,Asia/Riyadh,8
Algiers,Algeria,36°42'N,03°08'E,Africa/Algiers,224
Amman,Jordan,31°57'N,35°52'E,Asia/Amman,1100
Amsterdam,Netherlands,52°23'N,04°54'E,Europe/Amsterdam,2
Andorra la Vella,Andorra,42°31'N,01°32'E,Europe/Andorra,1023
Ankara,Turkey,39°57'N,32°54'E,Europe/Istanbul,938
Antananarivo,Madagascar,18°55'S,47°31'E,Indian/Antananarivo,1276
Apia,Samoa,13°50'S,171°50'W,Pacific/Apia,2
Ashgabat,Turkmenistan,38°00'N,57°50'E,Asia/Ashgabat,219
Asmara,Eritrea,15°19'N,38°55'E,Africa/Asmara,2325
Astana,Kazakhstan,51°10'N,71°30'E,Asia/Qyzylorda,347
Asuncion,Paraguay,25°10'S,57°30'W,America/Asuncion,124
Athens,Greece,37°58'N,23°46'E,Europe/Athens,338
Avarua,Cook Islands,21°12'N,159°46'W,Etc/GMT-10,208
Baghdad,Iraq,33°20'N,44°30'E,Asia/Baghdad,41
Baku,Azerbaijan,40°29'N,49°56'E,Asia/Baku,30
Bamako,Mali,12°34'N,07°55'W,Africa/Bamako,350
Bandar Seri Begawan,Brunei Darussalam,04°52'N,115°00'E,Asia/Brunei,1
Bangkok,Thailand,13°45'N,100°35'E,Asia/Bangkok,2
Bangui,Central African Republic,04°23'N,18°35'E,Africa/Bangui,373
Banjul,Gambia,13°28'N,16°40'W,Africa/Banjul,5
Basse-Terre,Guadeloupe,16°00'N,61°44'W,America/Guadeloupe,1
Basseterre,Saint Kitts and Nevis,17°17'N,62°43'W,America/St_Kitts,50
Beijing,China,39°55'N,116°20'E,Asia/Harbin,59
Beirut,Lebanon,33°53'N,35°31'E,Asia/Beirut,56
Belfast,Northern Ireland,54°36'N,5°56'W,Europe/Belfast,9
Belgrade,Yugoslavia,44°50'N,20°37'E,Europe/Belgrade,90
Belmopan,Belize,17°18'N,88°30'W,America/Belize,63
Berlin,Germany,52°30'N,13°25'E,Europe/Berlin,35
Bern,Switzerland,46°57'N,07°28'E,Europe/Zurich,510
Bishkek,Kyrgyzstan,42°54'N,74°46'E,Asia/Bishkek,772
Bissau,Guinea-Bissau,11°45'N,15°45'W,Africa/Bissau,0
Bloemfontein,South Africa,29°12'S,26°07'E,Africa/Johannesburg,1398
Bogota,Colombia,04°34'N,74°00'W,America/Bogota,2620
Brasilia,Brazil,15°47'S,47°55'W,Brazil/East,1087
Bratislava,Slovakia,48°10'N,17°07'E,Europe/Bratislava,132
Brazzaville,Congo,04°09'S,15°12'E,Africa/Brazzaville,156
Bridgetown,Barbados,13°05'N,59°30'W,America/Barbados,1
Brisbane,Australia,27°30'S,153°01'E,Australia/Brisbane,25
Brussels,Belgium,50°51'N,04°21'E,Europe/Brussels,62
Bucharest,Romania,44°27'N,26°10'E,Europe/Bucharest,71
Bucuresti,Romania,44°27'N,26°10'E,Europe/Bucharest,71
Budapest,Hungary,47°29'N,19°05'E,Europe/Budapest,120
Buenos Aires,Argentina,34°62'S,58°44'W,America/Buenos_Aires,6
Bujumbura,Burundi,03°16'S,29°18'E,Africa/Bujumbura,782
Cairo,Egypt,30°01'N,31°14'E,Africa/Cairo,74
Canberra,Australia,35°15'S,149°08'E,Australia/Canberra,575
Cape Town,South Africa,33°55'S,18°22'E,Africa/Johannesburg,1700
Caracas,Venezuela,10°30'N,66°55'W,America/Caracas,885
Castries,Saint Lucia,14°02'N,60°58'W,America/St_Lucia,125
Cayenne,French Guiana,05°05'N,52°18'W,America/Cayenne,9
Charlotte Amalie,United States of Virgin Islands,18°21'N,64°56'W,America/Virgin,0
Chisinau,Moldova,47°02'N,28°50'E,Europe/Chisinau,122
Conakry,Guinea,09°29'N,13°49'W,Africa/Conakry,26
Copenhagen,Denmark,55°41'N,12°34'E,Europe/Copenhagen,5
Cotonou,Benin,06°23'N,02°42'E,Africa/Porto-Novo,5
Dakar,Senegal,14°34'N,17°29'W,Africa/Dakar,24
Damascus,Syrian Arab Republic,33°30'N,36°18'E,Asia/Damascus,609
Dammam,Saudi Arabia,26°30'N,50°12'E,Asia/Riyadh,1
Dhaka,Bangladesh,23°43'N,90°26'E,Asia/Dhaka,8
Dili,East Timor,08°29'S,125°34'E,Asia/Dili,11
Djibouti,Djibouti,11°08'N,42°20'E,Africa/Djibouti,19
Dodoma,United Republic of Tanzania,06°08'S,35°45'E,Africa/Dar_es_Salaam,1119
Doha,Qatar,25°15'N,51°35'E,Asia/Qatar,10
Douglas,Isle Of Man,54°9'N,4°29'W,Europe/London,35
Dublin,Ireland,53°21'N,06°15'W,Europe/Dublin,85
Dushanbe,Tajikistan,38°33'N,68°48'E,Asia/Dushanbe,803
El Aaiun,Morocco,27°9'N,13°12'W,UTC,64
Fort-de-France,Martinique,14°36'N,61°02'W,America/Martinique,9
Freetown,Sierra Leone,08°30'N,13°17'W,Africa/Freetown,26
Funafuti,Tuvalu,08°31'S,179°13'E,Pacific/Funafuti,2
Gaborone,Botswana,24°45'S,25°57'E,Africa/Gaborone,1005
George Town,Cayman Islands,19°20'N,81°24'W,America/Cayman,3
Georgetown,Guyana,06°50'N,58°12'W,America/Guyana,30
Gibraltar,Gibraltar,36°9'N,5°21'W,Europe/Gibraltar,3
Guatemala,Guatemala,14°40'N,90°22'W,America/Guatemala,1500
Hanoi,Viet Nam,21°05'N,105°55'E,Asia/Saigon,6
Harare,Zimbabwe,17°43'S,31°02'E,Africa/Harare,1503
Havana,Cuba,23°08'N,82°22'W,America/Havana,59
Helsinki,Finland,60°15'N,25°03'E,Europe/Helsinki,56
Hobart,Tasmania,42°53'S,147°19'E,Australia/Hobart,4
Hong Kong,China,22°16'N,114°09'E,Asia/Hong_Kong,8
Honiara,Solomon Islands,09°27'S,159°57'E,Pacific/Guadalcanal,8
Islamabad,Pakistan,33°40'N,73°10'E,Asia/Karachi,508
Jakarta,Indonesia,06°09'S,106°49'E,Asia/Jakarta,6
Jerusalem,Israel,31°47'N,35°12'E,Asia/Jerusalem,775
Juba,South Sudan,4°51'N,31°36'E,Africa/Juba,550
Jubail,Saudi Arabia,27°02'N,49°39'E,Asia/Riyadh,2
Kabul,Afghanistan,34°28'N,69°11'E,Asia/Kabul,1791
Kampala,Uganda,00°20'N,32°30'E,Africa/Kampala,1155
Kathmandu,Nepal,27°45'N,85°20'E,Asia/Kathmandu,1337
Khartoum,Sudan,15°31'N,32°35'E,Africa/Khartoum,380
Kiev,Ukraine,50°30'N,30°28'E,Europe/Kiev,153
Kigali,Rwanda,01°59'S,30°04'E,Africa/Kigali,1497
Kingston,Jamaica,18°00'N,76°50'W,America/Jamaica,9
Kingston,Norfolk Island,45°20'S,168°43'E,Pacific/Norfolk,113
Kingstown,Saint Vincent and the Grenadines,13°10'N,61°10'W,America/St_Vincent,1
Kinshasa,Democratic Republic of the Congo,04°20'S,15°15'E,Africa/Kinshasa,312
Koror,Palau,07°20'N,134°28'E,Pacific/Palau,33
Kuala Lumpur,Malaysia,03°09'N,101°41'E,Asia/Kuala_Lumpur,22
Kuwait,Kuwait,29°30'N,48°00'E,Asia/Kuwait,55
La Paz,Bolivia,16°20'S,68°10'W,America/La_Paz,4014
Libreville,Gabon,00°25'N,09°26'E,Africa/Libreville,15
Lilongwe,Malawi,14°00'S,33°48'E,Africa/Blantyre,1229
Lima,Peru,12°00'S,77°00'W,America/Lima,13
Lisbon,Portugal,38°42'N,09°10'W,Europe/Lisbon,123
Ljubljana,Slovenia,46°04'N,14°33'E,Europe/Ljubljana,385
Lome,Togo,06°09'N,01°20'E,Africa/Lome,25
London,England,51°30'N,00°07'W,Europe/London,24
Luanda,Angola,08°50'S,13°15'E,Africa/Luanda,6
Lusaka,Zambia,15°28'S,28°16'E,Africa/Lusaka,1154
Luxembourg,Luxembourg,49°37'N,06°09'E,Europe/Luxembourg,232
Macau,Macao,22°12'N,113°33'E,Asia/Macau,6
Madinah,Saudi Arabia,24°28'N,39°36'E,Asia/Riyadh,631
Madrid,Spain,40°25'N,03°45'W,Europe/Madrid,582
Majuro,Marshall Islands,7°4'N,171°16'E,Pacific/Majuro,65
Makkah,Saudi Arabia,21°26'N,39°49'E,Asia/Riyadh,240
Malabo,Equatorial Guinea,03°45'N,08°50'E,Africa/Malabo,56
Male,Maldives,04°00'N,73°28'E,Indian/Maldives,2
Mamoudzou,Mayotte,12°48'S,45°14'E,Indian/Mayotte,420
Managua,Nicaragua,12°06'N,86°20'W,America/Managua,50
Manama,Bahrain,26°10'N,50°30'E,Asia/Bahrain,2
Manila,Philippines,14°40'N,121°03'E,Asia/Manila,21
Maputo,Mozambique,25°58'S,32°32'E,Africa/Maputo,44
Maseru,Lesotho,29°18'S,27°30'E,Africa/Maseru,1628
Masqat,Oman,23°37'N,58°36'E,Asia/Muscat,8
Mbabane,Swaziland,26°18'S,31°06'E,Africa/Mbabane,1243
Mecca,Saudi Arabia,21°26'N,39°49'E,Asia/Riyadh,240
Medina,Saudi Arabia,24°28'N,39°36'E,Asia/Riyadh,631
Mexico,Mexico,19°20'N,99°10'W,America/Mexico_City,2254
Minsk,Belarus,53°52'N,27°30'E,Europe/Minsk,231
Mogadishu,Somalia,02°02'N,45°25'E,Africa/Mogadishu,9
Monaco,Priciplality Of Monaco,43°43'N,7°25'E,Europe/Monaco,206
Monrovia,Liberia,06°18'N,10°47'W,Africa/Monrovia,9
Montevideo,Uruguay,34°50'S,56°11'W,America/Montevideo,32
Moroni,Comoros,11°40'S,43°16'E,Indian/Comoro,29
Moscow,Russian Federation,55°45'N,37°35'E,Europe/Moscow,247
Moskva,Russian Federation,55°45'N,37°35'E,Europe/Moscow,247
Mumbai,India,18°58'N,72°49'E,Asia/Kolkata,14
Muscat,Oman,23°37'N,58°32'E,Asia/Muscat,8
N'Djamena,Chad,12°10'N,14°59'E,Africa/Ndjamena,295
Nairobi,Kenya,01°17'S,36°48'E,Africa/Nairobi,1624
Nassau,Bahamas,25°05'N,77°20'W,America/Nassau,7
Naypyidaw,Myanmar,19°45'N,96°6'E,Asia/Rangoon,104
New Delhi,India,28°37'N,77°13'E,Asia/Kolkata,233
Ngerulmud,Palau,7°30'N,134°37'E,Pacific/Palau,3
Niamey,Niger,13°27'N,02°06'E,Africa/Niamey,223
Nicosia,Cyprus,35°10'N,33°25'E,Asia/Nicosia,162
Nouakchott,Mauritania,20°10'S,57°30'E,Africa/Nouakchott,3
Noumea,New Caledonia,22°17'S,166°30'E,Pacific/Noumea,69
Nuku'alofa,Tonga,21°10'S,174°00'W,Pacific/Tongatapu,6
Nuuk,Greenland,64°10'N,51°35'W,America/Godthab,70
Oranjestad,Aruba,12°32'N,70°02'W,America/Aruba,33
Oslo,Norway,59°55'N,10°45'E,Europe/Oslo,170
Ottawa,Canada,45°27'N,75°42'W,US/Eastern,79
Ouagadougou,Burkina Faso,12°15'N,01°30'W,Africa/Ouagadougou,316
P'yongyang,Democratic People's Republic of Korea,39°09'N,125°30'E,Asia/Pyongyang,21
Pago Pago,American Samoa,14°16'S,170°43'W,Pacific/Pago_Pago,0
Palikir,Micronesia,06°55'N,158°09'E,Pacific/Ponape,71
Panama,Panama,09°00'N,79°25'W,America/Panama,2
Papeete,French Polynesia,17°32'S,149°34'W,Pacific/Tahiti,7
Paramaribo,Suriname,05°50'N,55°10'W,America/Paramaribo,7
Paris,France,48°50'N,02°20'E,Europe/Paris,109
Perth,Australia,31°56'S,115°50'E,Australia/Perth,20
Phnom Penh,Cambodia,11°33'N,104°55'E,Asia/Phnom_Penh,10
Podgorica,Montenegro,42°28'N,19°16'E,Europe/Podgorica,53
Port Louis,Mauritius,20°9'S,57°30'E,Indian/Mauritius,5
Port Moresby,Papua New Guinea,09°24'S,147°08'E,Pacific/Port_Moresby,44
Port-Vila,Vanuatu,17°45'S,168°18'E,Pacific/Efate,1
Port-au-Prince,Haiti,18°40'N,72°20'W,America/Port-au-Prince,34
Port of Spain,Trinidad and Tobago,10°40'N,61°31'W,America/Port_of_Spain,66
Porto-Novo,Benin,06°23'N,02°42'E,Africa/Porto-Novo,38
Prague,Czech Republic,50°05'N,14°22'E,Europe/Prague,365
Praia,Cape Verde,15°02'N,23°34'W,Atlantic/Cape_Verde,35
Pretoria,South Africa,25°44'S,28°12'E,Africa/Johannesburg,1322
Pristina,Albania,42°40'N,21°10'E,Europe/Tirane,576
Quito,Ecuador,00°15'S,78°35'W,America/Guayaquil,2812
Rabat,Morocco,34°1'N,6°50'W,Africa/Casablanca,75
Reykjavik,Iceland,64°10'N,21°57'W,Atlantic/Reykjavik,61
Riga,Latvia,56°53'N,24°08'E,Europe/Riga,7
Riyadh,Saudi Arabia,24°41'N,46°42'E,Asia/Riyadh,612
Road Town,British Virgin Islands,18°27'N,64°37'W,America/Virgin,1
Rome,Italy,41°54'N,12°29'E,Europe/Rome,95
Roseau,Dominica,15°20'N,61°24'W,America/Dominica,72
Saint Helier,Jersey,49°11'N,2°6'W,Etc/GMT,54
Saint Pierre,Saint Pierre and Miquelon,46°46'N,56°12'W,America/Miquelon,5
Saipan,Northern Mariana Islands,15°12'N,145°45'E,Pacific/Saipan,200
Sana,Yemen,15°20'N,44°12'W,Asia/Aden,2199
Sana'a,Yemen,15°20'N,44°12'W,Asia/Aden,2199
San Jose,Costa Rica,09°55'N,84°02'W,America/Costa_Rica,931
San Juan,Puerto Rico,18°28'N,66°07'W,America/Puerto_Rico,21
San Marino,San Marino,43°55'N,12°30'E,Europe/San_Marino,749
San Salvador,El Salvador,13°40'N,89°10'W,America/El_Salvador,621
Santiago,Chile,33°24'S,70°40'W,America/Santiago,476
Santo Domingo,Dominica Republic,18°30'N,69°59'W,America/Santo_Domingo,14
Sao Tome,Sao Tome and Principe,00°10'N,06°39'E,Africa/Sao_Tome,13
Sarajevo,Bosnia and Herzegovina,43°52'N,18°26'E,Europe/Sarajevo,511
Seoul,Republic of Korea,37°31'N,126°58'E,Asia/Seoul,49
Singapore,Republic of Singapore,1°18'N,103°48'E,Asia/Singapore,16
Skopje,The Former Yugoslav Republic of Macedonia,42°01'N,21°26'E,Europe/Skopje,238
Sofia,Bulgaria,42°45'N,23°20'E,Europe/Sofia,531
Sri Jayawardenapura Kotte,Sri Lanka,6°54'N,79°53'E,Asia/Colombo,7
St. George's,Grenada,32°22'N,64°40'W,America/Grenada,7
St. John's,Antigua and Barbuda,17°7'N,61°51'W,America/Antigua,1
St. <NAME>,Guernsey,49°26'N,02°33'W,Europe/Guernsey,1
Stanley,Falkland Islands,51°40'S,59°51'W,Atlantic/Stanley,23
Stockholm,Sweden,59°20'N,18°05'E,Europe/Stockholm,52
Sucre,Bolivia,16°20'S,68°10'W,America/La_Paz,2903
Suva,Fiji,18°06'S,178°30'E,Pacific/Fiji,0
Sydney,Australia,33°53'S,151°13'E,Australia/Sydney,3
Taipei,Republic of China (Taiwan),25°02'N,121°38'E,Asia/Taipei,9
T'bilisi,Georgia,41°43'N,44°50'E,Asia/Tbilisi,467
Tbilisi,Georgia,41°43'N,44°50'E,Asia/Tbilisi,467
Tallinn,Estonia,59°22'N,24°48'E,Europe/Tallinn,39
Tarawa,Kiribati,01°30'N,173°00'E,Pacific/Tarawa,2
Tashkent,Uzbekistan,41°20'N,69°10'E,Asia/Tashkent,489
Tegucigalpa,Honduras,14°05'N,87°14'W,America/Tegucigalpa,994
Tehran,Iran,35°44'N,51°30'E,Asia/Tehran,1191
Thimphu,Bhutan,27°31'N,89°45'E,Asia/Thimphu,2300
Tirana,Albania,41°18'N,19°49'E,Europe/Tirane,90
Tirane,Albania,41°18'N,19°49'E,Europe/Tirane,90
Torshavn,Faroe Islands,62°05'N,06°56'W,Atlantic/Faroe,39
Tokyo,Japan,35°41'N,139°41'E,Asia/Tokyo,8
Tripoli,Libyan Arab Jamahiriya,32°49'N,13°07'E,Africa/Tripoli,81
Tunis,Tunisia,36°50'N,10°11'E,Africa/Tunis,4
Ulan Bator,Mongolia,47°55'N,106°55'E,Asia/Ulaanbaatar,1330
Ulaanbaatar,Mongolia,47°55'N,106°55'E,Asia/Ulaanbaatar,1330
Vaduz,Liechtenstein,47°08'N,09°31'E,Europe/Vaduz,463
Valletta,Malta,35°54'N,14°31'E,Europe/Malta,48
Vienna,Austria,48°12'N,16°22'E,Europe/Vienna,171
Vientiane,Lao People's Democratic Republic,17°58'N,102°36'E,Asia/Vientiane,171
Vilnius,Lithuania,54°38'N,25°19'E,Europe/Vilnius,156
W. Indies,Antigua and Barbuda,17°20'N,61°48'W,America/Antigua,0
Warsaw,Poland,52°13'N,21°00'E,Europe/Warsaw,107
Washington DC,USA,39°91'N,77°02'W,US/Eastern,23
Wellington,New Zealand,41°19'S,174°46'E,Pacific/Auckland,7
Willemstad,Netherlands Antilles,12°05'N,69°00'W,America/Curacao,1
Windhoek,Namibia,22°35'S,17°04'E,Africa/Windhoek,1725
Yamoussoukro,Cote d'Ivoire,06°49'N,05°17'W,Africa/Abidjan,213
Yangon,Myanmar,16°45'N,96°20'E,Asia/Rangoon,33
Yaounde,Cameroon,03°50'N,11°35'E,Africa/Douala,760
Yaren,Nauru,0°32'S,166°55'E,Pacific/Nauru,0
Yerevan,Armenia,40°10'N,44°31'E,Asia/Yerevan,890
Zagreb,Croatia,45°50'N,15°58'E,Europe/Zagreb,123
# UK Cities
Aberdeen,Scotland,57°08'N,02°06'W,Europe/London,65
Birmingham,England,52°30'N,01°50'W,Europe/London,99
Bolton,England,53°35'N,02°15'W,Europe/London,105
Bradford,England,53°47'N,01°45'W,Europe/London,127
Bristol,England,51°28'N,02°35'W,Europe/London,11
Cardiff,Wales,51°29'N,03°13'W,Europe/London,9
Crawley,England,51°8'N,00°10'W,Europe/London,77
Edinburgh,Scotland,55°57'N,03°13'W,Europe/London,61
Glasgow,Scotland,55°50'N,04°15'W,Europe/London,8
Greenwich,England,51°28'N,00°00'W,Europe/London,24
Leeds,England,53°48'N,01°35'W,Europe/London,47
Leicester,England,52°38'N,01°08'W,Europe/London,138
Liverpool,England,53°25'N,03°00'W,Europe/London,25
Manchester,England,53°30'N,02°15'W,Europe/London,78
Newcastle Upon Time,England,54°59'N,01°36'W,Europe/London,47
Newcastle,England,54°59'N,01°36'W,Europe/London,47
Norwich,England,52°38'N,01°18'E,Europe/London,18
Oxford,England,51°45'N,01°15'W,Europe/London,72
Plymouth,England,50°25'N,04°15'W,Europe/London,50
Portsmouth,England,50°48'N,01°05'W,Europe/London,9
Reading,England,51°27'N,0°58'W,Europe/London,84
Sheffield,England,53°23'N,01°28'W,Europe/London,105
Southampton,England,50°55'N,01°25'W,Europe/London,9
Swansea,England,51°37'N,03°57'W,Europe/London,91
Swindon,England,51°34'N,01°47'W,Europe/London,112
Wolverhampton,England,52°35'N,2°08'W,Europe/London,89
Barrow-In-Furness,England,54°06'N,3°13'W,Europe/London,20
# US State Capitals
Montgomery,USA,32°21'N,86°16'W,US/Central,42
Juneau,USA,58°23'N,134°11'W,US/Alaska,29
Phoenix,USA,33°26'N,112°04'W,America/Phoenix,331
Little Rock,USA,34°44'N,92°19'W,US/Central,95
Sacramento,USA,38°33'N,121°28'W,US/Pacific,15
Denver,USA,39°44'N,104°59'W,US/Mountain,1600
Hartford,USA,41°45'N,72°41'W,US/Eastern,9
Dover,USA,39°09'N,75°31'W,US/Eastern,8
Tallahassee,USA,30°27'N,84°16'W,US/Eastern,59
Atlanta,USA,33°45'N,84°23'W,US/Eastern,267
Honolulu,USA,21°18'N,157°49'W,US/Hawaii,229
Boise,USA,43°36'N,116°12'W,US/Mountain,808
Springfield,USA,39°47'N,89°39'W,US/Central,190
Indianapolis,USA,39°46'N,86°9'W,US/Eastern,238
Des Moines,USA,41°35'N,93°37'W,US/Central,276
Topeka,USA,39°03'N,95°41'W,US/Central,289
Frankfort,USA,38°11'N,84°51'W,US/Eastern,243
Baton Rouge,USA,30°27'N,91°8'W,US/Central,15
Augusta,USA,44°18'N,69°46'W,US/Eastern,41
Annapolis,USA,38°58'N,76°30'W,US/Eastern,0
Boston,USA,42°21'N,71°03'W,US/Eastern,6
Lansing,USA,42°44'N,84°32'W,US/Eastern,271
Saint Paul,USA,44°56'N,93°05'W,US/Central,256
Jackson,USA,32°17'N,90°11'W,US/Central,90
Jefferson City,USA,38°34'N,92°10'W,US/Central,167
Helena,USA,46°35'N,112°1'W,US/Mountain,1150
Lincoln,USA,40°48'N,96°40'W,US/Central,384
Carson City,USA,39°9'N,119°45'W,US/Pacific,1432
Concord,USA,43°12'N,71°32'W,US/Eastern,117
Trenton,USA,40°13'N,74°45'W,US/Eastern,28
Santa Fe,USA,35°40'N,105°57'W,US/Mountain,2151
Albany,USA,42°39'N,73°46'W,US/Eastern,17
Raleigh,USA,35°49'N,78°38'W,US/Eastern,90
Bismarck,USA,46°48'N,100°46'W,US/Central,541
Columbus,USA,39°59'N,82°59'W,US/Eastern,271
Oklahoma City,USA,35°28'N,97°32'W,US/Central,384
Salem,USA,44°55'N,123°1'W,US/Pacific,70
Harrisburg,USA,40°16'N,76°52'W,US/Eastern,112
Providence,USA,41°49'N,71°25'W,US/Eastern,2
Columbia,USA,34°00'N,81°02'W,US/Eastern,96
Pierre,USA,44°22'N,100°20'W,US/Central,543
Nashville,USA,36°10'N,86°47'W,US/Central,149
Austin,USA,30°16'N,97°45'W,US/Central,167
Salt Lake City,USA,40°45'N,111°53'W,US/Mountain,1294
Montpelier,USA,44°15'N,72°34'W,US/Eastern,325
Richmond,USA,37°32'N,77°25'W,US/Eastern,68
Olympia,USA,47°2'N,122°53'W,US/Pacific,35
Charleston,USA,38°20'N,81°38'W,US/Eastern,11
Madison,USA,43°4'N,89°24'W,US/Central,281
Cheyenne,USA,41°8'N,104°48'W,US/Mountain,1860
# Major US Cities
Birmingham,USA,33°39'N,86°48'W,US/Central,197
Anchorage,USA,61°13'N,149°53'W,US/Alaska,30
Los Angeles,USA,34°03'N,118°15'W,US/Pacific,50
San Francisco,USA,37°46'N,122°25'W,US/Pacific,47
Bridgeport,USA,41°11'N,73°11'W,US/Eastern,13
Wilmington,USA,39°44'N,75°32'W,US/Eastern,15
Jacksonville,USA,30°19'N,81°39'W,US/Eastern,13
Miami,USA,26°8'N,80°12'W,US/Eastern,10
Chicago,USA,41°50'N,87°41'W,US/Central,189
Wichita,USA,37°41'N,97°20'W,US/Central,399
Louisville,USA,38°15'N,85°45'W,US/Eastern,142
New Orleans,USA,29°57'N,90°4'W,US/Central,10
Portland,USA,43°39'N,70°16'W,US/Eastern,6
Baltimore,USA,39°17'N,76°37'W,US/Eastern,31
Detroit,USA,42°19'N,83°2'W,US/Eastern,189
Minneapolis,USA,44°58'N,93°15'W,US/Central,260
Kansas City,USA,39°06'N,94°35'W,US/Central,256
Billings,USA,45°47'N,108°32'W,US/Mountain,946
Omaha,USA,41°15'N,96°0'W,US/Central,299
Las Vegas,USA,36°10'N,115°08'W,US/Pacific,720
Manchester,USA,42°59'N,71°27'W,US/Eastern,56
Newark,USA,40°44'N,74°11'W,US/Eastern,4
Albuquerque,USA,35°06'N,106°36'W,US/Mountain,1523
New York,USA,40°43'N,74°0'W,US/Eastern,17
Charlotte,USA,35°13'N,80°50'W,US/Eastern,217
Fargo,USA,46°52'N,96°47'W,US/Central,271
Cleveland,USA,41°28'N,81°40'W,US/Eastern,210
Philadelphia,USA,39°57'N,75°10'W,US/Eastern,62
Sioux Falls,USA,43°32'N,96°43'W,US/Central,443
Memphis,USA,35°07'N,89°58'W,US/Central,84
Houston,USA,29°45'N,95°22'W,US/Central,8
Dallas,USA,32°47'N,96°48'W,US/Central,137
Burlington,USA,44°28'N,73°9'W,US/Eastern,35
Virginia Beach,USA,36°50'N,76°05'W,US/Eastern,9
Seattle,USA,47°36'N,122°19'W,US/Pacific,63
Milwaukee,USA,43°03'N,87°57'W,US/Central,188
San Diego,USA,32°42'N,117°09'W,US/Pacific,16
Orlando,USA,28°32'N,81°22'W,US/Eastern,35
Buffalo,USA,42°54'N,78°50'W,US/Eastern,188
Toledo,USA,41°39'N,83°34'W,US/Eastern,180
# Canadian cities
Vancouver,Canada,49°15'N,123°6'W,America/Vancouver,55
Calgary,Canada,51°2'N,114°3'W,America/Edmonton,1040
Edmonton,Canada,53°32'N,113°29'W,America/Edmonton,664
Saskatoon,Canada,52°8'N,106°40'W,America/Regina,480
Regina,Canada,50°27'N,104°36'W,America/Regina,577
Winnipeg,Canada,49°53'N,97°8'W,America/Winnipeg,229
Toronto,Canada,43°39'N,79°22'W,America/Toronto,77
Montreal,Canada,45°30'N,73°33'W,America/Montreal,23
Quebec,Canada,46°48'N,71°14'W,America/Toronto,87
Fredericton,Canada,45°57'N,66°38'W,America/Halifax,8
Halifax,Canada,44°38'N,63°34'W,America/Halifax,36
Charlottetown,Canada,46°14'N,63°7'W,America/Halifax,2
St. John's,Canada,47°33'N,52°42'W,America/Halifax,116
Whitehorse,Canada,60°43'N,135°3'W,America/Whitehorse,696
Yellowknife,Canada,62°27'N,114°22'W,America/Yellowknife,191
Iqaluit,Canada,63°44'N,68°31'W,America/Iqaluit,3
"""
class AstralError(Exception):
"""Astral base exception class"""
def excel_datediff(start_date, end_date):
"""Return the same number of days between 2 dates as Excel does"""
return end_date.toordinal() - start_date.toordinal() + 2
class Location(object):
"""Provides access to information for single location."""
def __init__(self, info=None):
"""Initializes the object with a tuple of information.
:param info: A tuple of information to fill in the location info.
The tuple should contain items in the following order
================ =============
Field Default
================ =============
name Greenwich
region England
latitude 51.168
longitude 0
time zone name Europe/London
elevation 24
================ =============
See :attr:`timezone` property for a method of obtaining time zone
names
"""
self.astral = None
if info is None:
self.name = "Greenwich"
self.region = "England"
self._latitude = 51.168
self._longitude = 0.0
self._timezone_group = "Europe"
self._timezone_location = "London"
self._elevation = 24
else:
self.name = ""
self.region = ""
self._latitude = 0.0
self._longitude = 0.0
self._timezone_group = ""
self._timezone_location = ""
self._elevation = 0
try:
self.name = info[0]
self.region = info[1]
self.latitude = info[2]
self.longitude = info[3]
self.timezone = info[4]
self.elevation = info[5]
except IndexError:
pass
self.url = ""
def __repr__(self):
if self.region:
_repr = "%s/%s" % (self.name, self.region)
else:
_repr = self.name
repr_format = "%s, tz=%s, lat=%0.02f, lon=%0.02f"
return repr_format % (_repr, self.timezone, self.latitude, self.longitude)
@property
def latitude(self):
"""The location's latitude
``latitude`` can be set either as a string or as a number
For strings they must be of the form
degrees°minutes'[N|S] e.g. 51°31'N
For numbers, positive numbers signify latitudes to the North.
"""
return self._latitude
@latitude.setter
def latitude(self, latitude):
if isinstance(latitude, str) or isinstance(latitude, ustr):
(deg, rest) = latitude.split("°", 1)
(minute, rest) = rest.split("'", 1)
self._latitude = float(deg) + (float(minute) / 60)
if latitude.endswith("S"):
self._latitude = -self._latitude
else:
self._latitude = float(latitude)
@property
def longitude(self):
"""The location's longitude.
``longitude`` can be set either as a string or as a number
For strings they must be of the form
degrees°minutes'[E|W] e.g. 51°31'W
For numbers, positive numbers signify longitudes to the East.
"""
return self._longitude
@longitude.setter
def longitude(self, longitude):
if isinstance(longitude, str) or isinstance(longitude, ustr):
(deg, rest) = longitude.split("°", 1)
(minute, rest) = rest.split("'", 1)
self._longitude = float(deg) + (float(minute) / 60)
if longitude.endswith("W"):
self._longitude = -self._longitude
else:
self._longitude = float(longitude)
@property
def elevation(self):
"""The elevation in metres above sea level."""
return self._elevation
@elevation.setter
def elevation(self, elevation):
self._elevation = int(elevation)
@property
def timezone(self):
"""The name of the time zone for the location.
A list of time zone names can be obtained from pytz. For example.
>>> from pytz import all_timezones
>>> for timezone in all_timezones:
... print(timezone)
"""
if self._timezone_location != "":
return "%s/%s" % (self._timezone_group, self._timezone_location)
else:
return self._timezone_group
@timezone.setter
def timezone(self, name):
if name not in pytz.all_timezones:
raise ValueError("Timezone '%s' not recognized" % name)
try:
self._timezone_group, self._timezone_location = name.split("/", 1)
except ValueError:
self._timezone_group = name
self._timezone_location = ""
@property
def tz(self):
"""Time zone information."""
try:
tz = pytz.timezone(self.timezone)
return tz
except pytz.UnknownTimeZoneError:
raise AstralError("Unknown timezone '%s'" % self.timezone)
tzinfo = tz
@property
def solar_depression(self):
"""The number of degrees the sun must be below the horizon for the
dawn/dusk calculation.
Can either be set as a number of degrees below the horizon or as
one of the following strings
============= =======
String Degrees
============= =======
civil 6.0
nautical 12.0
astronomical 18.0
============= =======
"""
return self.astral.solar_depression
@solar_depression.setter
def solar_depression(self, depression):
if self.astral is None:
self.astral = Astral()
self.astral.solar_depression = depression
def sun(self, date=None, local=True):
"""Returns dawn, sunrise, noon, sunset and dusk as a dictionary.
:param date: The date for which to calculate the times.
If no date is specified then the current date will be used.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:returns: Dictionary with keys ``dawn``, ``sunrise``, ``noon``,
``sunset`` and ``dusk`` whose values are the results of the
corresponding methods.
:rtype: dict
"""
if self.astral is None:
self.astral = Astral()
if date is None:
date = datetime.date.today()
sun = self.astral.sun_utc(date, self.latitude, self.longitude)
if local:
for key, dt in sun.items():
| |
<gh_stars>0
#
# Basic graphics functions for the Kindle HUD
#
import png
import urllib
import math
import os
import datetime
import time
screenWidth = 800
screenHeight = 600
ScreenNumPixels = screenWidth * screenHeight
screenArray = bytearray(b'\xFF' * ScreenNumPixels)
# The Font class handles rendering of bitmapped fonts to the screen image.
# Because the Kindle is to be used in Landscape mode, the screen is sideways,
# so the Font class is designed to render fonts sideways.
#
# The Font class handles kerning.
#
# Font files are raw 8-bit greyscale bitmaps, with each character laid out
# in a single long row, with at least 1 column of white (255) separating each
# character.
# The first columns must be completely black, the second column completely white.
# This defines the height of the font. Every character from ! to ~
#
# The whole image must be mirrored then rotated 90 deg anticlockwise.
#
# Example:
# trebuchet_28px = Font('Trebuchet_28px.raw')
# trebuchet_28px.Print("Hello world", 10, 10)
#
class Font:
def ColumnIsBlank(self, column):
for i in range(column*self.fontHeight, (column+1)*self.fontHeight):
if (self.fontArray[i] != 255):
return 0
return 1
def ReadKerningLeft(self, startColumn, maxKern):
kerningLeft = []
#print(startColumn)
for y in range(0,self.fontHeight):
#print("y", y)
pixelAddress = startColumn*self.fontHeight + y
whiteSpace = 0
while (self.fontArray[pixelAddress] > 196) & (self.fontArray[pixelAddress] != 254) & (whiteSpace<maxKern):
#print(whiteSpace,pixelAddress)
whiteSpace += 1
pixelAddress += self.fontHeight
kerningLeft.append(whiteSpace)
#print("kerning L", kerningLeft)
return kerningLeft
def ReadKerningRight(self, startColumn, maxKern):
kerningRight = []
for y in range(0,self.fontHeight):
#print("y", y)
pixelAddress = startColumn*self.fontHeight + y
whiteSpace = 0
while (self.fontArray[pixelAddress] > 196) & (self.fontArray[pixelAddress] != 254) & (whiteSpace<maxKern):
#print(whiteSpace,pixelAddress)
whiteSpace += 1
pixelAddress -= self.fontHeight
kerningRight.append(whiteSpace)
#print("kerning R", kerningRight)
return kerningRight
def __init__(self, fontFileName): # Font must contain black pixels in column 0, white pixels in column 1, and be rotated 90deg clockwise
self.fontArray = bytearray(1)
self.fontHeight = 0
self.numColumns = 0
self.characters = []
self.fontFile = open(fontFileName, 'rb') # Load up the font file, and work out how tall the font is based on the black line at column 0
self.fontArray = bytearray(self.fontFile.read())
for i in range(0, 300):
if self.fontArray[i] == 0:
self.fontHeight += 1
else:
break
self.interCharacterPixels = int(self.fontHeight / 8)
self.spacePixels = int(self.fontHeight / 3)
numColumns = int(len(self.fontArray) / self.fontHeight)
print fontFileName
print(self.fontHeight)
print(numColumns)
column = 1
charactersLeft = True
#for i in range (0,10):
while True:
#character = [0,
characterWidth = 0
while self.ColumnIsBlank(column):
column += 1
if column >= numColumns:
break
if column >= numColumns:
break
startColumn = column
c1 = column*self.fontHeight
while self.ColumnIsBlank(column) == False:
column += 1
characterWidth += 1
endColumn = column
c2 = column*self.fontHeight
kerningLeft = self.ReadKerningLeft( startColumn, endColumn-startColumn)
kerningRight = self.ReadKerningRight( endColumn-1, endColumn-startColumn)
characterPixels = self.fontArray[c1:c2]
character = [self.fontHeight, characterWidth, characterPixels, kerningLeft, kerningRight] # height, width, pixelData, leftKern, rightKern
self.characters.append(character)
#print(characterWidth,"wide")
#print(len(character),"pixels")
#print("[",c1,":",c2,"]")
#print(len(self.characters), "characters read")
#self.ReadKerningRight(233,6)
def BlitCharacter(self, character, x,y):
characterHeight = character[0]
characterWidth = character[1]
if (x+characterWidth) > 800:
return
startScreenPixelAddress = (screenWidth-x-1)*screenHeight + y
endScreenPixelAddress = startScreenPixelAddress + characterHeight
startCharacterPixelAddress = 0
endCharacterPixelAddress = characterHeight
for i in range(0, characterWidth):
k = startCharacterPixelAddress
for j in range(startScreenPixelAddress,endScreenPixelAddress):
if character[2][k] < 255:
screenArray[j] = character[2][k]
k += 1
#screenArray[startScreenPixelAddress:endScreenPixelAddress] -= character[2][startCharacterPixelAddress:endCharacterPixelAddress]
startCharacterPixelAddress += characterHeight
endCharacterPixelAddress += characterHeight
startScreenPixelAddress -= screenHeight
endScreenPixelAddress -= screenHeight
def CalcKerning(self, prev, this):
if prev == '':
return 0
prevChar = self.characters[ord(prev)-ord('!')]
thisChar = self.characters[ord(this)-ord('!')]
kerningRight = prevChar[4]
kerningLeft = thisChar[3]
minSpan = 99
for i in range(0, len(kerningLeft)):
span = kerningLeft[i] + kerningRight[i]
if span < minSpan:
minSpan = span
jump = self.interCharacterPixels + prevChar[1] - minSpan
return jump
def Print(self, textString, x, y):
if y+self.fontHeight >= 600:
return
prev = ''
prevWidth = 0
for c in textString:
charNum = ord(c)-ord('!')
character = self.characters[charNum]
if c != ' ':
if (charNum>=0) & (charNum<len(self.characters)):
x += self.CalcKerning(prev, c)
self.BlitCharacter(character,x,y)
prev = c
prevWidth = character[1]
else:
prev = ''
x += prevWidth + self.spacePixels
def CalcWidth(self, textString):
prev = ''
prevWidth = 0
x = 0
for c in textString:
charNum = ord(c)-ord('!')
character = self.characters[charNum]
if c != ' ':
if (charNum>=0) & (charNum<len(self.characters)):
x += self.CalcKerning(prev, c)
#self.BlitCharacter(character,x,y)
prev = c
prevWidth = character[1]
else:
prev = ''
x += prevWidth + self.spacePixels
return x+character[1]
def PrintCentred(self, textString, x, y):
width = self.CalcWidth(textString)
self.Print(textString, x - int(width/2), y)
def PrintRightJus(self, textString, x, y):
width = self.CalcWidth(textString)
self.Print(textString, x - width, y)
def PrintBlock(self, textString, x, y, width):
words = textString.split()
xPos = x
yPos = y
maxX = x+width
justCR = False
for word in words:
wordWidth = self.CalcWidth(word)
if (xPos+wordWidth) > maxX:
xPos = x
yPos += self.fontHeight
self.Print(word, xPos, yPos)
xPos += wordWidth+self.spacePixels
return yPos+self.fontHeight
# Icons class is similar to the Font class, but only renders single icons, not
# strings of characters.
#
class Icons:
def __init__(self, iconsFileName): # Image must contain black pixels in column 0, white pixels in column 1, and be rotated 90 deg clockwise
print iconsFileName
self.iconArray = bytearray(1)
self.iconHeight = 0
self.numColumns = 0
self.icons = []
self.iconFile = open(iconsFileName, 'rb') # Load up the font file, and work out how tall the font is based on the black line at column 0
self.iconArray = bytearray(self.iconFile.read())
for i in range(0, 300): # Measure height of initial black line (max 300 pixels)
if self.iconArray[i] == 0:
self.iconHeight += 1
else:
break
self.numIcons = int(((len(self.iconArray)-1) / self.iconHeight) / self.iconHeight)
#print(len(self.iconArray))
print(self.iconHeight)
print self.numIcons, "icons"
def Draw(self, iconNumber, x, y):
if (iconNumber<0) | (iconNumber>=self.numIcons):
return
startIconAddress = ((iconNumber*self.iconHeight) + 1) *self.iconHeight
endIconAddress = startIconAddress + self.iconHeight
startScreenPixelAddress = (screenWidth-x-1)*screenHeight + y
endScreenPixelAddress = startScreenPixelAddress + self.iconHeight
for i in range(0, self.iconHeight):
#print(i, startScreenPixelAddress, endScreenPixelAddress, startIconAddress, endIconAddress)
screenArray[startScreenPixelAddress:endScreenPixelAddress] = self.iconArray[startIconAddress:endIconAddress]
#screenArray[startScreenPixelAddress] = 0
#screenArray[endScreenPixelAddress] = 0
startScreenPixelAddress -= screenHeight
endScreenPixelAddress -= screenHeight
startIconAddress += self.iconHeight
endIconAddress += self.iconHeight
def DrawCentred(self, iconNumber, x, y):
self.Draw(iconNumber, x - int(self.iconHeight/2), y)
# Save the screen image to disk as a raw 8-bit bitmap.
# This can then be written to the screen using the 'eips' command.
#
def SaveImage():
with open("test_screen.raw", 'wb') as output:
output.write(screenArray)
def WriteImageToKindleScreen(filename):
today = str(datetime.datetime.today())
print today
print "Filename: " + filename
f = open(filename, 'wb')
w = png.Writer(screenHeight, screenWidth, greyscale=True)
w.write_array(f, screenArray)
f.close()
os.system("eips -c -g " + filename)
# Load up all the fonts and icons
trebuchet_37px = Font('Trebuchet_37px.raw')
trebuchet_28px = Font('Trebuchet_28px.raw')
trebuchet_17px = Font('Trebuchet_17px.raw')
trebuchet_17px_Bold = Font('Trebuchet_17px_Bold.raw')
trebuchet_11px = Font('Trebuchet_11px.raw')
numbers_103px = Font('Large_Numbers.raw')
weatherooLarge = Icons('Icons_Large.raw')
weatherooSmall = Icons('Icons_Small.raw')
trafficIcons = Icons('Traffic_Icons.raw')
extrasIcons = Icons('Extras_Icons_01.raw')
batteryIcons = Font('Batteries.raw')
# Fetch the rain radar image from Wunderground
# and combine it with an image of the map of London.
#
# Fixme: The fact that it uses a bitmap of the map of
# London makes this a bit un-portable. It would be
# nice to find some way to get it to auto-generate the
# map. But it's non-trivial because you really need a
# nice simple map, with only a few details on it. Where
# can you get that from?
#
def ReadRainRadar():
print "ReadRainRadar()"
try:
rain = png.Reader(file=urllib.urlopen('http://api.wunderground.com/api/f912ab4e1aac3427/radar/image.png?minlat=51.189549&maxlat=51.835226&minlon=-0.820341&maxlon=0.615710&width=380&height=277&newmaps=0'))
mapFile = open('London_Map_380x277.png', 'rb')
mapPNG = png.Reader(mapFile)
pngInfo = rain.read()
palette = rain.palette()
xSize = pngInfo[0]
ySize = pngInfo[1]
iterator = pngInfo[2]
rainPixelData = list(iterator)
mapPngInfo = mapPNG.read()
iterator2 = mapPngInfo[2]
mapPixelData = list(iterator2)
print "rendering"
for x in range(0, xSize):
p = (xSize-x+10)*screenHeight + 313
for y in range(0, ySize):
rainPixel = palette[rainPixelData[y][x]]
r = rainPixel[0]
g = rainPixel[1]
b = rainPixel[2]
a = rainPixel[3]
if a < 255:
g = 255
else:
if g<0:
g=0
screenArray[p] = ((g) * mapPixelData[y][x]) / 256
p += 1
except:
print "Rain radar Failed"
def trunc(x): # Return the integer part of a number
return int(x)
def frac(x):
return x - int(x)
def invfrac(x):
return 1 - (x-int(x))
def abs(x):
if x < 0:
return -x
else:
return | |
transformed=False, **kw):
"""None <- writePDBQS( nodes, filename=None, sort=True,
pdbRec=['ATOM', 'HETATM', 'CONECT'],
bondOrigin=('File', 'UserDefined'), ssOrigin=None, **kw)
\nRequired Argument:\n
nodes --- TreeNodeSet holding the current selection
\nOptional Arguments:\n
filename --- name of the PDB file (default=None). If None is given
The name of the molecule plus the .pdb extension will be used\n
sort --- Boolean flag to either sort or not the nodes before writing
the PDB file (default=True)\n
pdbRec --- List of the PDB Record to save in the PDB file.
(default: ['ATOM', 'HETATM', 'CONECT']\n
bondOrigin --- string or list specifying the origin of the bonds to save
as CONECT records in the PDB file. Can be 'all' or a tuple\n
ssOrigin --- Flag to specify the origin of the secondary structure
information to be saved in the HELIX, SHEET and TURN
record. Can either be None, File, PROSS or From Stride.
"""
kw['sort'] = sort
kw['bondOrigin'] = bondOrigin
kw['ssOrigin'] = ssOrigin
kw['filename'] = filename
kw['transformed'] = transformed
apply(self.doitWrapper, (nodes,), kw)
def guiCallback(self):
# Get the current selection
nodes = self.vf.getSelection()
if not len(nodes): return None
molecules, nodes = self.vf.getNodesByMolecule(nodes)
# Make sure that the current selection only belong to 1 molecule
if len(molecules)>1:
self.warningMsg("ERROR: Cannot create the PDBQS file.\n\
The current selection contains more than one molecule.")
return
self.mol = molecules[0]
self.nodes = nodes[0]
self.title = "Write PDBQS file"
self.fileType = [('PDBQS file', '*.pdbqs')]
currentPath = os.getcwd()
self.defaultFilename = self.defaultFilename = os.path.join(currentPath,self.mol.name) + '.pdbqs'
val = self.showForm('saveOptions', force=1,cancelCfg={'text':'Cancel', 'command':self.dismiss_cb},
okCfg={'text':'OK', 'command':self.dismiss_cb})
if val:
del val['avRec']
if val.has_key('filebrowse'): del val['filebrowse']
ebn = self.cmdForms['saveOptions'].descr.entryByName
w = ebn['pdbRec']['widget']
val['pdbRec'] = w.get()
if len(val['pdbRec'])==0: return
apply(self.doitWrapper, (self.nodes,), val)
pdbqsWriterGuiDescr = {'widgetType':'Menu', 'menyBarName':'menuRoot',
'menuButtonName':'File',
'menyEntryLabel':'Write PDBS ...',
'index':0}
from Pmv.fileCommandsGUI import PDBQSWriterGUI
class PDBQTWriter(PDBWriter):
"""Command to write PDBQT files using a PDBQT spec compliant writer
\nPackage : Pmv
\nModule : fileCommands
\nClass : PDBQTWriter
\nCommand : write PDBQT
\nSynopsis:\n
None <--- writePDBQT( nodes, filename=None, sort=True,
pdbRec=['ATOM', 'HETATM', 'CONECT'],
bondOrigin=('File', 'UserDefined'), ssOrigin=None, **kw)\n
\nRequired argument:\n
nodes --- TreeNodeSet holding the current selection
\nOptional arguments:\n
filename --- name of the PDB file (default=None). If None is given
The name of the molecule plus the .pdb extension will be used\n
sort --- Boolean flag to either sort or not the nodes before writing
the PDB file (default=True)\n
pdbRec --- List of the PDB Record to save in the PDB file.
(default: ['ATOM', 'HETATM', 'CONECT']\n
bondOrigin --- string or list specifying the origin of the bonds to save
as CONECT records in the PDB file. Can be 'all' or a tuple\n
ssOrigin --- Flag to specify the origin of the secondary structure
information to be saved in the HELIX, SHEET and TURN
record. Can either be None, File, PROSS or Stride.
"""
# def logString(self, *args, **kw):
# """return None as log string as we don't want to log this
#"""
# pass
def doit(self, nodes, filename=None, sort=True, pdbRec = ['ATOM', 'HETATM', 'CONECT'],
bondOrigin=('File', 'UserDefined'), ssOrigin=None, transformed=False):
if bondOrigin == 0:
bondOrigin = ('File', 'UserDefined')
elif bondOrigin == 1:
bondOrigin = 'all'
nodes = self.vf.expandNodes(nodes)
molecules = nodes.top.uniq()
if len(molecules)==0: return 'ERROR'
# Cannot save multiple molecules in one PDB file. They need to be merged into one molecule
# first
if len(molecules)>1:
self.warningMsg("ERROR: Cannot create the PDBQT file, the current selection contains more than one molecule")
return 'ERROR'
mol = molecules[0]
# Cannot save a PDBQ file if all the atoms donnot have a charge assigned.
allAtoms = nodes.findType(Atom)
try:
allAtoms.charge
except:
try:
allAtoms.charge=allAtoms.gast_charge
except:
self.warningMsg('ERROR: Cannot create the PDBQT file, all atoms in the current selection do not have a charge field. Use the commands in the editCommands module to either assign Kollman charges or compute Gasteiger charges')
return 'ERROR'
try:
allAtoms.autodock_element
except:
self.warningMsg('ERROR: Cannot create the PDBQT file, all atoms do not have an autodock_element field')
return 'ERROR'
if filename is None:
filename = './%s.pdbqt'%mol.name
if transformed:
oldConf = mol.allAtoms[0].conformation
self.setNewCoords(mol)
writer = PdbqtWriter()
writer.write(filename, nodes, sort=sort, records=pdbRec,
bondOrigin=bondOrigin, ssOrigin=ssOrigin)
if transformed:
mol.allAtoms.setConformation(oldConf)
def __call__(self, nodes, filename=None, sort=True, pdbRec = ['ATOM', 'HETATM', 'CONECT'],
bondOrigin=('File', 'UserDefined'), ssOrigin=None, transformed=False, **kw):
"""None <- writePDBQT( nodes, filename=None, sort=True,
pdbRec=['ATOM', 'HETATM', 'CONECT'],
bondOrigin=('File', 'UserDefined'), ssOrigin=None, **kw)\n
\nRequired Argument:\n
nodes --- TreeNodeSet holding the current selection
\nOptional Arguments:\n
filename --- name of the PDB file (default=None). If None is given
The name of the molecule plus the .pdb extension will be used\n
sort --- Boolean flag to either sort or not the nodes before writing
the PDB file (default=True)\n
pdbRec --- List of the PDB Record to save in the PDB file.
(default: ['ATOM', 'HETATM', 'CONECT']\n
bondOrigin --- string or list specifying the origin of the bonds to save
as CONECT records in the PDB file. Can be 'all' or a tuple\n
ssOrigin --- Flag to specify the origin of the secondary structure
information to be saved in the HELIX, SHEET and TURN
record. Can either be None, File, PROSS or Stride.
"""
kw['sort'] = sort
kw['bondOrigin'] = bondOrigin
kw['ssOrigin'] = ssOrigin
kw['filename'] = filename
kw['transformed'] = transformed
apply(self.doitWrapper, (nodes,), kw)
def guiCallback(self):
# Get the current selection
nodes = self.vf.getSelection()
if not len(nodes): return None
molecules, nodes = self.vf.getNodesByMolecule(nodes)
# Make sure that the current selection only belong to 1 molecule
if len(molecules)>1:
self.warningMsg("ERROR: Cannot create the PDBQT file.\n\
The current selection contains more than one molecule.")
return
self.mol = molecules[0]
self.nodes = nodes[0]
self.title = "Write PDBQT file"
self.fileType = [('PDBQT file', '*.pdbqt')]
currentPath = os.getcwd()
self.defaultFilename = self.defaultFilename = os.path.join(currentPath,self.mol.name) + '.pdbqt'
val = self.showForm('saveOptions', force=1,cancelCfg={'text':'Cancel', 'command':self.dismiss_cb},
okCfg={'text':'OK', 'command':self.dismiss_cb})
if val:
del val['avRec']
if val.has_key('filebrowse'): del val['filebrowse']
ebn = self.cmdForms['saveOptions'].descr.entryByName
w = ebn['pdbRec']['widget']
val['pdbRec'] = w.get()
if len(val['pdbRec'])==0: return
apply(self.doitWrapper, (self.nodes,), val)
pdbqtWriterGuiDescr = {'widgetType':'Menu', 'menyBarName':'menuRoot',
'menuButtonName':'File',
'menyEntryLabel':'Write PDBQT ...',
'index':0}
from Pmv.fileCommandsGUI import PDBQTWriterGUI
class SaveMMCIF(MVCommand):
"""This command writes macromolecular Crystallographic Information File (mmCIF).
\nPackage : Pmv
\nModule : fileCommands
\nClass : SaveMMCIF
\nCommand : saveMMCIF
\nSynopsis:\n
None<---saveMMCIF(filename, nodes)
\nRequired Arguments:\n
filename --- name of the mmcif file (default=None). If None is given
The name of the molecule plus the .cif extension will be used\n
nodes --- TreeNodeSet holding the current selection
"""
# def logString(self, *args, **kw):
# """return None as log string as we don't want to log this
#"""
# pass
def doit(self, filename, nodes):
nodes = self.vf.expandNodes(nodes)
writer = MMCIFWriter()
writer.write(filename, nodes)
def __call__(self, filename, nodes,**kw):
"""None<---saveMMCIF(filename,nodes)
\nfilename --- name of the mmcif file (default=None). If None is given
The name of the molecule plus the .cif extension will be used\n
\nnodes --- TreeNodeSet holding the current selection
"""
nodes = self.vf.expandNodes(nodes)
apply(self.doitWrapper, (filename, nodes), kw)
def guiCallback(self):
filename = self.vf.askFileSave(types=[('MMCIF files', '*.cif')],
title="Write MMCIF File:")
if not filename: return
nodes = self.vf.getSelection()
if not len(nodes) : return None
if len(nodes.top.uniq())>1:
self.warningMsg("more than one molecule in selection")
return
self.doitWrapper(filename, nodes)
from Pmv.fileCommandsGUI import SaveMMCIFGUI
class STLWriter(MVCommand):
"""Command to write coords&normals of currently displayed geometries as
ASCII STL files (STL = stereolithography, don't confuse with standard
template library)
\nPackage : Pmv
\nModule : fileCommands
\nClass : STLWriter
\nCommand : writeSTL
\nSynopsis:\n
None<---Write STL ( filename, sphereQuality=2,
cylinderQuality=10 )
"""
# def logString(self, *args, **kw):
# """return None as log string as we don't want to log this
#"""
# pass
def doit(self, filename, sphereQuality=0,
cylinderQuality=0):
"""Write all displayed geoms of all displayed molecules in
the STL format"""
from DejaVu.DataOutput import OutputSTL
STL = OutputSTL()
stl = STL.getSTL(self.vf.GUI.VIEWER.rootObject, filename,
sphereQuality=sphereQuality,
cylinderQuality=cylinderQuality)
if stl is not None and len(stl) != 0:
f = open(filename, 'w')
map( lambda x,f=f: f.write(x), stl)
f.close()
def __call__(self, filename, **kw):
"""None <--- Write STL ( filename, sphereQuality=0,
cylinderQuality=0 )"""
apply( self.doitWrapper, (filename,), kw )
def guiCallback(self):
newfile = self.vf.askFileSave(types=[('STL files', '*.stl'),],
title='Select STL files:')
if not newfile or newfile == '':
return
from DejaVu.DataOutput import STLGUI
opPanel = STLGUI(master=None, title='STL Options')
opPanel.displayPanel(create=1)
if not opPanel.readyToRun:
return
# get values from options panel
di = opPanel.getValues()
sq = | |
= log_wait_for + 1
logger.info('starting %s run' % skyline_app)
if os.path.isfile(skyline_app_loglock):
logger.error('error :: bin/%s.d log management seems to have failed, continuing' % skyline_app)
try:
os.remove(skyline_app_loglock)
logger.info('log lock file removed')
except OSError:
logger.error('error :: failed to remove %s, continuing' % skyline_app_loglock)
pass
else:
logger.info('bin/%s.d log management done' % skyline_app)
# @added 20190417 - Feature #2950: Report defaulted settings to log
# Added all the globally declared settings to enable reporting in the
# log the state of each setting.
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
logger.info('SERVER_METRIC_PATH is set from settings.py to %s' % str(SERVER_METRIC_PATH))
except:
SERVER_METRIC_PATH = ''
logger.info('warning :: SERVER_METRIC_PATH is not declared in settings.py, defaults to \'\'')
logger.info('skyline_app_graphite_namespace is set to %s' % str(skyline_app_graphite_namespace))
try:
BOUNDARY_METRICS = settings.BOUNDARY_METRICS
boundary_metrics_count = len(BOUNDARY_METRICS)
logger.info('BOUNDARY_METRICS is set from settings.py with %s Boundry metrics' % str(boundary_metrics_count))
except:
BOUNDARY_METRICS = []
logger.info('warning :: BOUNDARY_METRICS is not declared in settings.py, defaults to []')
try:
ENABLE_BOUNDARY_DEBUG = settings.ENABLE_BOUNDARY_DEBUG
logger.info('ENABLE_BOUNDARY_DEBUG is set from settings.py to %s' % str(ENABLE_BOUNDARY_DEBUG))
except:
logger.info('warning :: ENABLE_BOUNDARY_DEBUG is not declared in settings.py, defaults to False')
ENABLE_BOUNDARY_DEBUG = False
try:
BOUNDARY_AUTOAGGRERATION = settings.BOUNDARY_AUTOAGGRERATION
logger.info('BOUNDARY_AUTOAGGRERATION is set from settings.py to %s' % str(BOUNDARY_AUTOAGGRERATION))
except:
BOUNDARY_AUTOAGGRERATION = False
logger.info('warning :: BOUNDARY_AUTOAGGRERATION is not declared in settings.py, defaults to False')
try:
BOUNDARY_AUTOAGGRERATION_METRICS = settings.BOUNDARY_AUTOAGGRERATION_METRICS
logger.info('BOUNDARY_AUTOAGGRERATION_METRICS is set from settings.py')
except:
BOUNDARY_AUTOAGGRERATION_METRICS = (
("auotaggeration_metrics_not_declared", 60)
)
logger.info('warning :: BOUNDARY_AUTOAGGRERATION_METRICS is not declared in settings.py, defaults to %s' % (
str(BOUNDARY_AUTOAGGRERATION_METRICS[0])))
while 1:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
except:
logger.error('error :: skyline cannot connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
sleep(10)
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
if settings.REDIS_PASSWORD:
self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
else:
self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
continue
# Report app up
self.redis_conn.setex(skyline_app, 120, now)
# Discover unique metrics
unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
if len(unique_metrics) == 0:
logger.info('no metrics in redis. try adding some - see README')
sleep(10)
continue
# Reset boundary_metrics
boundary_metrics = []
# Build boundary metrics
for metric_name in unique_metrics:
for metric in BOUNDARY_METRICS:
CHECK_MATCH_PATTERN = metric[0]
check_match_pattern = re.compile(CHECK_MATCH_PATTERN)
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
pattern_match = check_match_pattern.match(base_name)
if pattern_match:
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: boundary metric - pattern MATCHED - ' + metric[0] + " | " + base_name)
boundary_metrics.append([metric_name, metric[1]])
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: boundary metrics - ' + str(boundary_metrics))
if len(boundary_metrics) == 0:
logger.info('no Boundary metrics in redis. try adding some - see README')
sleep(10)
continue
# @added 20171216 - Task #2236: Change Boundary to only send to Panorama on alert
# Pass added_at as an argument t spin_process so that the panaroma_anomaly_file
# can be moved from SKYLINE_TMP_DIR to the PANORAMA_CHECK_PATH
added_at = str(int(time()))
# Spawn processes
pids = []
for i in range(1, settings.BOUNDARY_PROCESSES + 1):
if i > len(boundary_metrics):
logger.info('WARNING: Skyline Boundary is set for more cores than needed.')
break
# @modified 20171216 - Task #2236: Change Boundary to only send to Panorama on alert
# Pass added_at as an argument to spin_process so that the panaroma_anomaly_file
# can be moved from SKYLINE_TMP_DIR to the PANORAMA_CHECK_PATH
# p = Process(target=self.spin_process, args=(i, boundary_metrics))
p = Process(target=self.spin_process, args=(i, boundary_metrics, added_at))
pids.append(p)
p.start()
# Send wait signal to zombie processes
for p in pids:
p.join()
# Grab data from the queue and populate dictionaries
exceptions = dict()
anomaly_breakdown = dict()
while 1:
try:
key, value = self.anomaly_breakdown_q.get_nowait()
if key not in anomaly_breakdown.keys():
anomaly_breakdown[key] = value
else:
anomaly_breakdown[key] += value
except Empty:
break
while 1:
try:
key, value = self.exceptions_q.get_nowait()
if key not in exceptions.keys():
exceptions[key] = value
else:
exceptions[key] += value
except Empty:
break
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis set instead of Manager() list
boundary_not_anomalous_metrics = []
try:
literal_boundary_not_anomalous_metrics = list(self.redis_conn.smembers('boundary.not_anomalous_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate list from Redis set boundary.not_anomalous_metrics')
literal_boundary_not_anomalous_metrics = []
for metric_list_string in literal_boundary_not_anomalous_metrics:
metric = literal_eval(metric_list_string)
boundary_not_anomalous_metrics.append(metric)
# @added 20171214 - Bug #2232: Expiry boundary last_seen keys appropriately
# Expire keys
if settings.BOUNDARY_ENABLE_ALERTS:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# for not_anomalous_metric in self.not_anomalous_metrics:
for not_anomalous_metric in boundary_not_anomalous_metrics:
metric_name = not_anomalous_metric[1]
base_name = metric_name.replace(FULL_NAMESPACE, '', 1)
algorithm = not_anomalous_metric[8]
if ENABLE_BOUNDARY_DEBUG:
logger.info("debug :: not_anomalous_metric - " + str(not_anomalous_metric))
anomaly_cache_key_expiration_time = 1
# @modified 20171228 - Task #2236: Change Boundary to only send to Panorama on alert
# Wrapped in try - Added algorithm as it is required if the metric has
# multiple rules covering a number of algorithms
try:
anomaly_cache_key = 'anomaly_seen.%s.%s' % (algorithm, base_name)
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: anomaly_cache_key - anomaly_seen.%s.%s' % (algorithm, base_name))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to determine string for anomaly_cache_key')
anomaly_cache_key = 'anomaly_seen.%s' % (base_name)
times_seen = 0
try:
self.redis_conn.setex(anomaly_cache_key, anomaly_cache_key_expiration_time, packb(int(times_seen)))
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: redis - anomaly_cache_key set OK - %s' % str(anomaly_cache_key))
except:
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: redis failed - anomaly_cache_key set failed - %s' % str(anomaly_cache_key))
# @added 20171216 - Task #2236: Change Boundary to only send to Panorama on alert
# Remove tmp_panaroma_anomaly_file
# @modified 20171228 - Task #2236: Change Boundary to only send to Panorama on alert
# Added algorithm as it is required if the metric has
# multiple rules covering a number of algorithms
tmp_panaroma_anomaly_file = '%s/%s.%s.%s.panorama_anomaly.txt' % (
settings.SKYLINE_TMP_DIR, added_at, algorithm, base_name)
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: set tmp_panaroma_anomaly_file to - %s' % (str(tmp_panaroma_anomaly_file)))
if os.path.isfile(tmp_panaroma_anomaly_file):
try:
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: removing tmp_panaroma_anomaly_file - %s' % (str(tmp_panaroma_anomaly_file)))
os.remove(str(tmp_panaroma_anomaly_file))
except OSError:
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: error removing tmp_panaroma_anomaly_file - %s' % (str(tmp_panaroma_anomaly_file)))
pass
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis set instead of Manager() list
boundary_anomalous_metrics = []
try:
literal_boundary_anomalous_metrics = list(self.redis_conn.smembers('boundary.anomalous_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate list from Redis set boundary.anomalous_metrics')
literal_boundary_anomalous_metrics = []
for metric_list_string in literal_boundary_anomalous_metrics:
metric = literal_eval(metric_list_string)
boundary_anomalous_metrics.append(metric)
# Send alerts
if settings.BOUNDARY_ENABLE_ALERTS:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# for anomalous_metric in self.anomalous_metrics:
for anomalous_metric in boundary_anomalous_metrics:
datapoint = str(anomalous_metric[0])
metric_name = anomalous_metric[1]
base_name = metric_name.replace(FULL_NAMESPACE, '', 1)
expiration_time = str(anomalous_metric[2])
metric_trigger = str(anomalous_metric[5])
alert_threshold = int(anomalous_metric[6])
metric_alerters = anomalous_metric[7]
algorithm = anomalous_metric[8]
if ENABLE_BOUNDARY_DEBUG:
logger.info("debug :: anomalous_metric - " + str(anomalous_metric))
# Determine how many times has the anomaly been seen if the
# ALERT_THRESHOLD is set to > 1 and create a cache key in
# redis to keep count so that alert_threshold can be honored
if alert_threshold == 0:
times_seen = 1
if ENABLE_BOUNDARY_DEBUG:
logger.info("debug :: alert_threshold - " + str(alert_threshold))
if alert_threshold == 1:
times_seen = 1
if ENABLE_BOUNDARY_DEBUG:
logger.info("debug :: alert_threshold - " + str(alert_threshold))
if alert_threshold > 1:
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: alert_threshold - ' + str(alert_threshold))
anomaly_cache_key_count_set = False
anomaly_cache_key_expiration_time = (int(alert_threshold) + 1) * 60
anomaly_cache_key = 'anomaly_seen.%s.%s' % (algorithm, base_name)
try:
anomaly_cache_key_count = self.redis_conn.get(anomaly_cache_key)
if not anomaly_cache_key_count:
try:
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: redis no anomaly_cache_key - ' + str(anomaly_cache_key))
times_seen = 1
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: redis setex anomaly_cache_key - ' + str(anomaly_cache_key))
self.redis_conn.setex(anomaly_cache_key, anomaly_cache_key_expiration_time, packb(int(times_seen)))
logger.info('set anomaly seen key :: %s seen %s' % (anomaly_cache_key, str(times_seen)))
except Exception as e:
logger.error('error :: redis setex failed :: %s' % str(anomaly_cache_key))
logger.error('error :: could not set key: %s' % e)
else:
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: redis anomaly_cache_key retrieved OK - ' + str(anomaly_cache_key))
anomaly_cache_key_count_set = True
except:
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: redis failed - anomaly_cache_key retrieval failed - ' + str(anomaly_cache_key))
anomaly_cache_key_count_set = False
if anomaly_cache_key_count_set:
unpacker = Unpacker(use_list=False)
unpacker.feed(anomaly_cache_key_count)
raw_times_seen = list(unpacker)
times_seen = int(raw_times_seen[0]) + 1
try:
self.redis_conn.setex(anomaly_cache_key, anomaly_cache_key_expiration_time, packb(int(times_seen)))
logger.info('set anomaly seen key :: %s seen %s' % (anomaly_cache_key, str(times_seen)))
except:
times_seen = 1
logger.error('error :: set anomaly seen key failed :: %s seen %s' % (anomaly_cache_key, str(times_seen)))
# Alert the alerters if times_seen > alert_threshold
if times_seen >= alert_threshold:
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: | |
factor."""
if hasattr(self,'stock'):
oversupply_factor = DfOper.divi([self.stock.values_energy.loc[:,year].to_frame(), self.throughput], expandable=False, collapsible=True).fillna(1)
oversupply_factor.replace(np.inf, 1, inplace=True)
oversupply_factor[oversupply_factor<1] = 1
if (oversupply_factor.values>1.000000001).any():
self.oversupply_factor = oversupply_factor
#TODO fix
return oversupply_factor
else:
return None
else:
return None
def adjust_energy(self,oversupply_factor,year):
# self.stock.capacity_factor.loc[:,year] = util.DfOper.mult([self.stock.capacity_factor.loc[:,year].to_frame(),1/oversupply_factor])
self.stock.values_energy.loc[:,year] = util.DfOper.mult([self.stock.values_energy.loc[:,year].to_frame(),1/oversupply_factor])
def set_rollover_groups(self):
"""sets the internal index for use in stock and cost calculations"""
# determines whether stock rollover needs to occur on demand sector or resource bin index
self.stock.rollover_group_levels = []
self.stock.rollover_group_names = []
if self.stock._has_data is True:
for name, level in zip(self.stock.raw_values.index.names, self.stock.raw_values.index.levels):
if (name == 'resource_bin' or name == 'demand_sector') and name not in self.stock.rollover_group_names:
if name == 'demand_sector':
level = self.demand_sectors
self.stock.rollover_group_levels.append(list(level))
self.stock.rollover_group_names.append(name)
elif name == 'resource_bin' or name == 'demand_sector':
original_levels = self.stock.rollover_group_levels[self.stock.rollover_group_names.index(name)]
new_levels = list(set(original_levels+list(level)))
self.stock.rollover_group_levels[self.stock.rollover_group_names.index(name)] = new_levels
if self.potential._has_data is True:
for name, level in zip(self.potential.raw_values.index.names, self.potential.raw_values.index.levels):
if (name == 'resource_bin' or name == 'demand_sector') and name not in self.stock.rollover_group_names:
if name == 'demand_sector':
level = self.demand_sectors
self.stock.rollover_group_levels.append(list(level))
self.stock.rollover_group_names.append(name)
elif name == 'resource_bin' or name == 'demand_sector':
original_levels = self.stock.rollover_group_levels[self.stock.rollover_group_names.index(name)]
new_levels = list(set(original_levels+list(level)))
self.stock.rollover_group_levels[self.stock.rollover_group_names.index(name)] = new_levels
for technology in self.technologies.values():
attributes = vars (technology)
for att in attributes:
obj = getattr(technology, att)
if inspect.isclass(type(obj)) and hasattr(obj, '__dict__') and hasattr(obj, 'raw_values') and obj.raw_values is not None:
for name, level in zip(obj.raw_values.index.names, obj.raw_values.index.levels):
if (name == 'resource_bin' or name == 'demand_sector') and name not in self.stock.rollover_group_names:
if name == 'demand_sector':
level = self.demand_sectors
self.stock.rollover_group_levels.append(list(level))
self.stock.rollover_group_names.append(name)
elif name == 'resource_bin' or name == 'demand_sector':
original_levels = self.stock.rollover_group_levels[self.stock.rollover_group_names.index(name)]
new_levels = list(set(original_levels+list(level)))
self.stock.rollover_group_levels[self.stock.rollover_group_names.index(name)] = new_levels
if self.name == self.distribution_grid_node_name and 'demand_sector' not in self.stock.rollover_group_names:
#requires distribution grid node to maintain demand sector resolution in its stocks
self.stock.rollover_group_levels.append(self.demand_sectors)
self.stock.rollover_group_names.append('demand_sector')
elif self.name == self.distribution_grid_node_name:
original_levels = self.stock.rollover_group_levels[self.stock.rollover_group_names.index('demand_sector')]
new_levels = list(set(original_levels+self.demand_sectors))
self.stock.rollover_group_levels[self.stock.rollover_group_names.index('demand_sector')] = new_levels
self.stock.rollover_group_names = [GeoMapper.supply_primary_geography] + self.stock.rollover_group_names
self.stock.rollover_group_levels = [GeoMapper.geography_to_gau[GeoMapper.supply_primary_geography]] + self.stock.rollover_group_levels
def add_stock(self):
"""add stock instance to node"""
self.stock = SupplyStock(supply_node=self.name, scenario=self.scenario)
def calculate(self):
#all nodes can have potential conversions. Set to None if no data.
self.add_nodes()
self.conversion, self.resource_unit = self.add_conversion()
self.set_rollover_groups()
self.calculate_subclasses()
self.calculate_stock_measures()
self.add_case_stock()
self.set_adjustments()
self.set_pass_through_df_dict()
self.setup_stock_rollover(self.years)
def calculate_input_stock(self):
"""calculates the technology stocks in a node based on the combination of measure-stocks and reference stocks"""
levels = self.stock.rollover_group_levels + [self.years] + [self.tech_names]
names = self.stock.rollover_group_names + ['year'] + ['supply_technology']
index = pd.MultiIndex.from_product(levels,names=names)
if self.stock._has_data is True and 'supply_technology' in self.stock.raw_values.index.names:
#remap to technology stocks
self.stock.years = self.years
self.stock.remap(map_from='raw_values', map_to='technology', converted_geography=GeoMapper.supply_primary_geography, fill_timeseries=True, fill_value=np.nan)
#TODO add to clean timeseries. Don't allow filling of timseries before raw values.
self.stock.technology[self.stock.technology.index.get_level_values('year')<min(self.stock.raw_values.index.get_level_values('year'))] = np.nan
self.convert_stock('stock', 'technology')
self.stock.technology = self.stock.technology.reorder_levels(names)
self.stock.technology = self.stock.technology.reindex(index)
#if there's case_specific stock data, we must use that to replace reference technology stocks
if hasattr(self.case_stock,'technology'):
# if there are levels in the case specific stock that are not in the reference stock, we must remove that level from the case stock
mismatched_levels = [x for x in self.case_stock.technology.index.names if x not in self.stock.technology.index.names]
if len(mismatched_levels):
self.case_stock.technology= util.remove_df_levels(self.case_stock.technology,mismatched_levels)
#if there are still level mismatches, it means the reference stock has more levels, which returns an error
if np.any(util.difference_in_df_names(self.case_stock.technology, self.stock.technology,return_bool=True)):
raise ValueError("technology stock indices in node %s do not match input energy system stock data" %self.name)
else:
#if the previous test is passed, we use the reference stock to fill in the Nans of the case stock
self.case_stock.technology = self.case_stock.technology.reorder_levels(names)
self.case_stock.technology = self.case_stock.technology.reindex(index)
self.stock.technology = self.case_stock.technology.fillna(self.stock.technology)
self.stock.technology = self.stock.technology.unstack('year')
self.stock.technology.columns = self.stock.technology.columns.droplevel()
self.stock.technology = util.reindex_df_level_with_new_elements(self.stock.technology,'supply_technology',self.tech_names)
elif hasattr(self.case_stock,'technology'):
# if there are levels in the case specific stock that are not in the rollover groups, we must remove that level from the case stock
mismatched_levels = [x for x in self.case_stock.technology.index.names if x not in names]
if len(mismatched_levels):
self.case_stock.technology = util.remove_df_levels(self.case_stock.technology,mismatched_levels)
#if there are still level mismatches, it means the rollover has more levels, which returns an error
if len([x for x in self.stock.rollover_group_names if x not in self.case_stock.technology.index.names]) :
raise ValueError("technology stock levels in node %s do not match other node input data" %self.name)
else:
#if the previous test is passed we reindex the case stock for unspecified technologies
self.case_stock.technology = self.case_stock.technology.reorder_levels(names)
structure_df = pd.DataFrame(1,index=index,columns=['value'])
self.case_stock.technology = self.case_stock.technology.reindex(index)
self.stock.technology = self.case_stock.technology
self.stock.technology = self.stock.technology.unstack('year')
self.stock.technology.columns = self.stock.technology.columns.droplevel()
self.stock.technology = util.reindex_df_level_with_new_elements(self.stock.technology,'supply_technology',self.tech_names)
else:
levels = self.stock.rollover_group_levels + [self.tech_names]
names = self.stock.rollover_group_names + ['supply_technology']
index = pd.MultiIndex.from_product(levels,names=names)
self.stock.technology = util.empty_df(index=index,columns=self.years,fill_value=np.NaN)
if self.stock._has_data is True and 'supply_technology' not in self.stock.raw_values.index.names:
levels = self.stock.rollover_group_levels + [self.years]
names = self.stock.rollover_group_names + ['year']
index = pd.MultiIndex.from_product(levels,names=names)
structure_df = pd.DataFrame(1,index=index,columns=['value'])
self.stock.remap(map_from='raw_values', map_to='total', converted_geography=GeoMapper.supply_primary_geography, time_index = self.years,fill_timeseries=True, fill_value=np.nan)
#TODO add to clean timeseries. Don't allow filling of timseries before raw values.
self.stock.total[self.stock.total.index.get_level_values('year')<min(self.stock.raw_values.index.get_level_values('year'))] = np.nan
self.stock.total = DfOper.mult([self.stock.total,structure_df],fill_value=np.nan)
self.convert_stock('stock', 'total')
if hasattr(self.case_stock,'total'):
mismatched_levels = [x for x in self.case_stock.total.index.names if x not in names]
if len(mismatched_levels):
self.case_stock.total = util.remove_df_levels(self.case_stock.total,mismatched_levels)
#if there are still level mismatches, it means the reference stock has more levels, which returns an error
if np.any(util.difference_in_df_names(self.case_stock.total, self.stock.total,return_bool=True)):
raise ValueError("total stock indices in node %s do not match input energy system stock data" %self.name)
else:
#if the previous test is passed, we use the reference stock to fill in the Nans of the case stock
self.case_stock.total= self.case_stock.total.reorder_levels(names)
self.stock.total = self.stock.total.reorder_levels(names)
structure_df = pd.DataFrame(1,index=index,columns=['value'])
self.case_stock.total = DfOper.mult([self.case_stock.total,structure_df],fill_value=np.nan)
self.stock.total = DfOper.mult([self.stock.total,structure_df],fill_value=np.nan)
self.stock.total = self.case_stock.total.fillna(self.stock.total)
self.stock.total = self.stock.total.unstack('year')
self.stock.total.columns = self.stock.total.columns.droplevel()
elif hasattr(self.case_stock,'total'):
levels = self.stock.rollover_group_levels + [self.years]
names = self.stock.rollover_group_names + ['year']
index = pd.MultiIndex.from_product(levels,names=names)
# if there are levels in the case specific stock that are not in the rollover groups, we must remove that level from the case stock
mismatched_levels = [x for x in self.case_stock.total.index.names if x not in names]
if len(mismatched_levels):
self.case_stock.total = util.remove_df_levels(self.case_stock.total,mismatched_levels)
#if there are still level mismatches, it means the rollover has more levels, which returns an error
if len([x for x in names if x not in self.case_stock.total.index.names]) :
raise ValueError("total stock levels in node %s do not match other node input data" %self.name)
else:
self.case_stock.total= self.case_stock.total.reorder_levels(names)
self.case_stock.total = self.case_stock.total.reindex(index)
self.stock.total = self.case_stock.total
self.stock.total = self.stock.total.unstack('year')
self.stock.total.columns = self.stock.total.columns.droplevel()
else:
index = pd.MultiIndex.from_product(self.stock.rollover_group_levels,names=self.stock.rollover_group_names)
self.stock.total = util.empty_df(index=index,columns=self.years,fill_value=np.NaN)
if self.stock._has_data or hasattr(self.case_stock,'data') and self.case_stock._has_data == True:
self.stock._has_data = True
self.max_total()
if cfg.rio_supply_run and self.name not in cfg.rio_excluded_nodes:
self.stock.technology.loc[:, cfg.supply_years] = self.stock.technology.loc[:, cfg.supply_years].fillna(0)
self.format_rollover_stocks()
def max_total(self):
tech_sum = util.remove_df_levels(self.stock.technology,'supply_technology')
if hasattr(self.stock,'total'):
if np.all(np.isnan(self.stock.total.values)) and not np.any(np.isnan(self.stock.technology.values)):
self.stock.total = self.stock.total.fillna(tech_sum)
else:
self.stock.total[self.stock.total.values<tech_sum.values] = tech_sum
else:
self.stock.total = pd.DataFrame(np.nan, tech_sum.index,tech_sum.columns)
def format_rollover_stocks(self):
#transposed technology stocks are used for entry in the stock rollover function
self.stock.technology_rollover = self.stock.technology.stack(dropna=False)
util.replace_index_name(self.stock.technology_rollover,'year')
self.stock.total_rollover = util.remove_df_levels(self.stock.technology_rollover,'supply_technology')
self.stock.technology_rollover=self.stock.technology_rollover.unstack('supply_technology')
for tech_name in self.tech_names:
if tech_name not in self.stock.technology_rollover.columns:
self.stock.technology_rollover[tech_name]=np.nan
def add_case_stock(self):
self.case_stock = StockItem()
tech_stocks = []
for technology in self.technologies.values():
for stock in technology.specified_stocks.values():
if stock.values is not None:
stock.values['supply_technology'] = technology.name
stock.values.set_index('supply_technology', append=True, inplace=True)
tech_stocks.append(stock.values)
if len(tech_stocks):
self.case_stock._has_data = True
self.case_stock.technology = util.DfOper.add(tech_stocks, expandable=False)
self.case_stock.technology[self.case_stock.technology.index.get_level_values('year')<cfg.getParamAsInt('current_year')] = np.nan
total_stocks = []
for stock in self.total_stocks.values():
if stock.values is not None:
self.case_stock._has_data = True
total_stocks.append(stock.values)
if len(total_stocks):
self.case_stock.total = DfOper.add(total_stocks, expandable=False)
self.case_stock.total[self.case_stock.total.index.get_level_values('year')<cfg.getParamAsInt('current_year')] = np.nan
# elif len(tech_stocks):
# self.case_stock.total = util.remove_df_levels(self.case_stock.technology,'supply_technology')
def remap_tech_attrs(self, attr_classes, attr='values'):
"""
loops through attr_classes (ex. capital_cost, energy, etc.) in order to map technologies
that reference other technologies in their inputs (i.e. technology A is 150% of the capital cost technology B)
"""
attr_classes = util.ensure_iterable(attr_classes)
for technology in self.technologies.keys():
for attr_class in attr_classes:
self.remap_tech_attr(technology, attr_class, attr)
def remap_tech_attr(self, technology, class_name, attr):
"""
map reference technology values to their associated technology classes
"""
tech_class = getattr(self.technologies[technology], class_name)
if hasattr(tech_class, 'reference_tech'):
if getattr(tech_class, 'reference_tech'):
ref_tech_name = (getattr(tech_class, 'reference_tech'))
if not self.technologies.has_key(ref_tech_name):
raise ValueError("supply node {} has no technology {} to serve as a reference for technology {} in attribute {}".format(self.name, ref_tech_name, technology, class_name))
ref_tech_class = getattr(self.technologies[ref_tech_name], class_name)
# converted is an indicator of whether an input is an absolute
# or has already been converted to an absolute
if not getattr(ref_tech_class, 'absolute'):
# If a technnology hasn't been mapped, recursion is used
# to map it first (this can go multiple layers)
self.remap_tech_attr(getattr(tech_class, 'reference_tech'), class_name, attr)
if tech_class.raw_values is not None:
tech_data = getattr(tech_class, attr)
new_data = DfOper.mult([tech_data,
| |
# coding: utf-8
""" Some photometry tools for stellar spectroscopists """
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
from scipy import interpolate
from astropy.io import ascii
from .robust_polyfit import polyfit
import logging
import os, sys, time
logger = logging.getLogger(__name__)
__all__ = []
from .read_data import datapath
from .read_data import load_parsec_isochrones, load_dartmouth_isochrones
def eval_BC(Teff,logg,FeH,filt="g",allBCs=None):
"""
Default is alpha/Fe = +0.4
"""
if allBCs is None: allBCs = read_bc_table()
BCs = allBCs[filt]
points = np.atleast_2d([np.ravel(Teff),np.ravel(logg),np.ravel(FeH)]).T
points[points[:,2] < -2.5,2] = -2.5
out = interpolate.griddata(BCs[:,0:3], BCs[:,3], points, method='linear')
return out
def read_bc_table(fname=datapath+"/bolometric_corrections/bc_p04_ugriz.data"):
"""
Load a Casagrande+Vandenberg 2014 BC table
"""
with open(fname,'r') as fp:
lines = fp.readlines()
s = lines[1].split()
NTeff, Nlogg, NMH, Nfilt = int(s[0]), int(s[2]), int(s[5]), int(s[7])
allBCs = {}
Teffs = list(map(float, "".join(lines[2:5]).replace("\n"," ").split()))
loggs = list(map(float, lines[5].split()))
Nlist = list(map(int, lines[6].split()))
iline = 7
allBCs = {}
for ifilt in range(Nfilt):
BCtable = np.zeros((np.sum(Nlist)*NMH,4))
itable = 0
for iMH in range(NMH):
s = lines[iline].split()
FeH = float(s[2]); aFe = float(s[5]); filter = s[9]
iline += 1
for ilogg,logg in enumerate(loggs):
BCrow = []
while len(BCrow) < Nlist[ilogg]:
line = lines[iline]
iline += 1
BCrow += list(map(float, line.split()))
for iTeff,Teff in enumerate(Teffs[0:Nlist[ilogg]]):
BCtable[itable,0] = Teff
BCtable[itable,1] = logg
BCtable[itable,2] = FeH
BCtable[itable,3] = BCrow[iTeff]
itable += 1
allBCs[filter] = BCtable
return allBCs
##################################################################
# From Drlica-Wagner et al. 2018 (https://arxiv.org/abs/1708.01531)
# g_{des} = g_{sdss} - 0.104 \times (g-r)_{sdss} + 0.01
# r_{des} = r_{sdss} - 0.102 \times (g-r)_{sdss} + 0.02
# i_{des} = i_{sdss} - 0.256 \times (i-z)_{sdss} + 0.02
# z_{des} = z_{sdss} - 0.086 \times (i-z)_{sdss} + 0.01
##################################################################
def gr_sdss2des(gsdss,rsdss):
gmrsdss = gsdss - rsdss
gdes = gsdss - 0.104 * gmrsdss + 0.01
rdes = rsdss - 0.102 * gmrsdss + 0.02
return gdes, rdes
def iz_sdss2des(isdss,zsdss):
imzsdss = isdss - zsdss
ides = isdss - 0.256 * imzsdss + 0.02
zdes = zsdss - 0.086 * imzsdss + 0.01
return ides, zdes
def gr_des2sdss(gdes,rdes):
gmrdes = gdes-rdes
gmrsdss = (gmrdes + 0.01)/0.998
gsdss = gdes + 0.104 * gmrsdss - 0.01
rsdss = rdes + 0.102 * gmrsdss - 0.02
return gsdss, rsdss
def iz_des2sdss(ides,zdes):
imzdes = ides-zdes
imzsdss = (imzdes - 0.01)/0.830
isdss = ides + 0.256 * imzsdss - 0.02
zsdss = zdes + 0.086 * imzsdss - 0.01
return isdss, zsdss
def griz_des2sdss(gdes,rdes,ides,zdes):
gsdss, rsdss = gr_des2sdss(gdes,rdes)
isdss, zsdss = iz_des2sdss(ides,zdes)
return gsdss, rsdss, isdss, zsdss
### Setup Jordi06
# http://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php#Jordi2006
def get_jordi06_coeffs(type):
if type==0: # Combined Pop I/Pop II
a_Bmg = 0.313; e_a_Bmg = 0.003
b_Bmg = 0.219; e_b_Bmg = 0.002
a_Vmg =-0.565; e_a_Vmg = 0.001
b_Vmg =-0.016; e_b_Vmg = 0.001
elif type==1: # Pop I
a_Bmg = 0.312; e_a_Bmg = 0.003
b_Bmg = 0.219; e_b_Bmg = 0.002
a_Vmg =-0.573; e_a_Vmg = 0.002
b_Vmg =-0.016; e_b_Vmg = 0.002
elif type==2: # Pop II
a_Bmg = 0.349; e_a_Bmg = 0.009
b_Bmg = 0.245; e_b_Bmg = 0.006
a_Vmg =-0.569; e_a_Vmg = 0.007
b_Vmg = 0.021; e_b_Vmg = 0.004
else:
raise ValueError("Type must be 0, 1, 2 (got {})".format(type))
return a_Bmg, b_Bmg, a_Vmg, b_Vmg, e_a_Bmg, e_b_Bmg, e_a_Vmg, e_b_Vmg
def jordi06_gmi_to_VmI(gmi,geterr=True):
assert np.all(np.logical_or(np.ravel(gmi) < 2.1, np.isnan(np.ravel(gmi))))
VmI = 0.674 * gmi + 0.406
if geterr:
VmImin = (0.674-0.005)*gmi + (0.406 - 0.004)
VmImax = (0.674+0.005)*gmi + (0.406 + 0.004)
return VmImin, VmI, VmImax
return VmI
def _gmr_to_BmV(gmr,geterr=True,type=0):
a_Bmg, b_Bmg, a_Vmg, b_Vmg, e_a_Bmg, e_b_Bmg, e_a_Vmg, e_b_Vmg = get_jordi06_coeffs(type)
# Calculate middle
Bmg = a_Bmg*gmr + b_Bmg
Vmg = a_Vmg*gmr + b_Vmg
BmV = Bmg - Vmg
if not geterr: return BmV
# Calculate 1 sigma error estimate
if gmr >= 0:
Bmg_max = (a_Bmg+e_a_Bmg)*gmr+(b_Bmg+e_b_Bmg)
Bmg_min = (a_Bmg-e_a_Bmg)*gmr+(b_Bmg-e_b_Bmg)
Vmg_max = (a_Vmg+e_a_Vmg)*gmr+(b_Vmg+e_b_Vmg)
Vmg_min = (a_Vmg-e_a_Vmg)*gmr+(b_Vmg-e_b_Vmg)
else:
Bmg_max = (a_Bmg-e_a_Bmg)*gmr+(b_Bmg+e_b_Bmg)
Bmg_min = (a_Bmg+e_a_Bmg)*gmr+(b_Bmg-e_b_Bmg)
Vmg_max = (a_Vmg-e_a_Vmg)*gmr+(b_Vmg+e_b_Vmg)
Vmg_min = (a_Vmg+e_a_Vmg)*gmr+(b_Vmg-e_b_Vmg)
BmV_max = Bmg_max-Vmg_min
BmV_min = Bmg_min-Vmg_max
return BmV_min,BmV,BmV_max
jordi06_gmr_to_BmV = np.vectorize(_gmr_to_BmV)
###################################################################
# From Casagrande et al. 2010, applicable to dwarfs and subgiants #
###################################################################
def C10_Teff_BmV(BmV, FeH):
""" 73K scatter """
a0, a1, a2, a3, a4, a5 = .5665, .4809, -.0060, -.0613, -.0042, -.0055
theta = a0 + a1*BmV + a2*BmV*BmV + a3*BmV*FeH + a4*FeH + a5*FeH*FeH
Teff = 5040./theta
return Teff
def C10_Teff_VmI(VmI, FeH):
""" 59K scatter """
a0, a1, a2, a3, a4, a5 = .4033, .8171, -.1987, -.0409, .0319, .0012
theta = a0 + a1*VmI + a2*VmI*VmI + a3*VmI*FeH + a4*FeH + a5*FeH*FeH
Teff = 5040./theta
return Teff
##################################
# From Alonso et al. 1999: F0-K5 #
##################################
def A99_BC_V(Teff, FeH):
"""
Typical scatter is 0.025 for cool stars, 0.009 for warm stars (dividing at T=4500K)
Limits of applicability are 3.5 < logT < 3.96, though different for different [Fe/H] ranges
"""
X = np.ravel(np.log10(Teff) - 3.52); FeH = np.ravel(FeH)
# Equations 17 and 18
BC17 = -5.531e-2/X - 0.6177 + 4.420*X - 2.669*X**2. + 0.6943*X*FeH - 0.1071*FeH - 8.612e-3*FeH**2.
BC18 = -9.930e-2/X + 2.887e-2 + 2.275*X - 4.425*X**2. + 0.3505*X*FeH - 5.558e-2*FeH - 5.375e-3*FeH**2
BC = BC17.copy()
ii = np.log10(Teff) >= 3.65
BC[ii] = BC18[ii]
return BC
def B79_VmI_C2J(VmI):
""" Convert V-I in Cousins' mags to V-I in Johnson's mags from Bessell 1979 """
VmI = np.ravel(VmI)
out = VmI.copy()/0.778
out[VmI < 0] = VmI[VmI < 0]/0.713
ii = out > 2.0
out[ii] = (VmI[ii]+0.13)/0.835
return out
def A99_Teff_VmI(VmI):
"""
Johnson's V, Johnson's (NOT Cousins') I
125K scatter, no dependence on Fe/H.
I have assumed that VmI is given in Johnson-Cousins, and
"""
VmI = B79_VmI_C2J(VmI)
theta = 0.5379 + 0.3981 * VmI + 4.432e-2 * VmI**2 - 2.693e-2 * VmI**3
Teff = 5040./theta
return Teff
def _A99_function(X, FeH, a0, a1, a2, a3, a4, a5):
return a0 + a1*X + a2*X**2. + a3*X*FeH + a4*FeH + a5*FeH**2.
def _A99_Teff_BmV_3(BmV, FeH):
""" 167K scatter, B-V < 0.7 """
a0, a1, a2, a3, a4, a5 = 0.5716, 0.5404, -6.126e-2, -4.862e-2, -1.777e-2, -7.969e-3
return _A99_function(BmV, FeH, a0, a1, a2, a3, a4, a5)
def _A99_Teff_BmV_4(BmV, FeH):
""" 96K scatter, B-V > 0.8 """
a0, a1, a2, a3, a4, a5 = 0.6177, 0.4354, -4.025e-3, 5.204e-2, -0.1127, -1.385e-2
return _A99_function(BmV, FeH, a0, a1, a2, a3, a4, a5)
def A99_Teff_BmV(BmV, FeH):
"""
Johnson's B and V
Using equations 3 and 4 of A99, scatter is 167K
Linearly interpolating in theta = 5040/Teff for 0.7 < B-V < 0.8
"""
BmV = np.ravel(BmV); FeH = np.ravel(FeH)
t3 = _A99_Teff_BmV_3(BmV, FeH)
t4 = _A99_Teff_BmV_4(BmV, FeH)
# Bluest stars, Eq 3
t = t3.copy()
# Reddest stars, Eq 4
t[BmV > 0.8] = t4[BmV > 0.8]
# In between: 0.7 < B-V < 0.8, linear interpolate
ii = np.logical_and(BmV > 0.7, BmV <= 0.8)
x1, x2 = 0.7, 0.8
y1 = _A99_Teff_BmV_3(x1, FeH)
y2 = _A99_Teff_BmV_4(x2, FeH)
m = (y2 - y1)/(x2 - x1)
y = m * (BmV - x1) + y1
t[ii] = y[ii]
return 5040./t
def phot_logg(Teff,mag0,BCmag,distmod,Mstar=0.75):
"""
Using solar values from Venn et al. 2017
"""
return 4.44 + np.log10(Mstar) + 4*np.log10(Teff/5780) + 0.4 * (mag0 - distmod + BCmag - 4.75)
def iterate_find_logg(Teff,mag0,FeH,dmod,filt,maxiter=10,tol=.005):
""" Assumes [alpha/Fe] = +0.4, sdss mags for filt """
# Initialize BC and logg
BC = 0.0
logg = phot_logg(Teff,mag0,BC,dmod)
for iter in range(maxiter):
BC = eval_BC(Teff, logg, FeH, filt=filt)
new_logg = phot_logg(Teff,mag0,BC,dmod)
if np.all(np.abs(new_logg - logg) < tol):
break
logg = new_logg
else:
print("WARNING: Reached max iters")
return logg
def phot_logg_error(Tfracerr, dmoderr, masserr=0.05, magerr=0.0, BCerr=0.03):
"""
Estimate 1 sigma error in logg
Tfracerr: temperature error divided by temperature
dmoderr: distance modulus error in mag
masserr (0.05 mag): from assuming a mass, 0.05 is 0.7-0.8 Msun
magerr: assume this is negligible by default
BCerr: estimated about 0.03 mag from running CV14 several times
"""
Terr_mag = 4*Tfracerr # from a taylor expansion
magerr = 0.4*magerr
BCerr = 0.4*BCerr
dmoderr = 0.4*dmoderr
return np.sqrt(masserr**2 + Terr_mag**2 + magerr**2 + dmoderr**2 + BCerr**2)
###################
## Y2 isochrones ##
###################
def get_logT_to_logg(FeH=-3.0):
assert FeH in [-2.0, -2.5, -3.0]
if FeH == -2.0:
iso = ascii.read(datapath+'/stellar_param_data/afe040feh200set1_12gyr.txt')
elif FeH == -2.5:
iso = ascii.read(datapath+'/stellar_param_data/afe040feh250set1_12gyr.txt')
elif FeH == -3.0:
iso = ascii.read(datapath+'/stellar_param_data/afe040feh300set1_12gyr.txt')
ii_max_logT = np.argmax(iso['logT'])
max_logT = iso[ii_max_logT]['logT']
max_logg = iso[ii_max_logT]['logg']
#print max_logT, max_logg
ii = iso['logg'] < max_logg
logT = iso[ii]['logT']
logg = iso[ii]['logg']
logT_to_logg = interpolate.interp1d(logT,logg)
| |
<gh_stars>0
# Natural Language Toolkit: Recursive Descent Parser
#
# Copyright (C) 2001-2014 NLTK Project
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
from nltk.grammar import Nonterminal
from nltk.tree import Tree, ImmutableTree
from nltk.compat import unicode_repr
from nltk.parse.api import ParserI
##//////////////////////////////////////////////////////
## Recursive Descent Parser
##//////////////////////////////////////////////////////
class RecursiveDescentParser(ParserI):
"""
A simple top-down CFG parser that parses texts by recursively
expanding the fringe of a Tree, and matching it against a
text.
``RecursiveDescentParser`` uses a list of tree locations called a
"frontier" to remember which subtrees have not yet been expanded
and which leaves have not yet been matched against the text. Each
tree location consists of a list of child indices specifying the
path from the root of the tree to a subtree or a leaf; see the
reference documentation for Tree for more information
about tree locations.
When the parser begins parsing a text, it constructs a tree
containing only the start symbol, and a frontier containing the
location of the tree's root node. It then extends the tree to
cover the text, using the following recursive procedure:
- If the frontier is empty, and the text is covered by the tree,
then return the tree as a possible parse.
- If the frontier is empty, and the text is not covered by the
tree, then return no parses.
- If the first element of the frontier is a subtree, then
use CFG productions to "expand" it. For each applicable
production, add the expanded subtree's children to the
frontier, and recursively find all parses that can be
generated by the new tree and frontier.
- If the first element of the frontier is a token, then "match"
it against the next token from the text. Remove the token
from the frontier, and recursively find all parses that can be
generated by the new tree and frontier.
:see: ``nltk.grammar``
"""
def __init__(self, grammar, trace=0):
"""
Create a new ``RecursiveDescentParser``, that uses ``grammar``
to parse texts.
:type grammar: ContextFreeGrammar
:param grammar: The grammar used to parse texts.
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
self._grammar = grammar
self._trace = trace
def grammar(self):
return self._grammar
def parse(self, tokens):
# Inherit docs from ParserI
tokens = list(tokens)
self._grammar.check_coverage(tokens)
# Start a recursive descent parse, with an initial tree
# containing just the start symbol.
start = self._grammar.start().symbol()
initial_tree = Tree(start, [])
frontier = [()]
if self._trace:
self._trace_start(initial_tree, frontier, tokens)
return self._parse(tokens, initial_tree, frontier)
def _parse(self, remaining_text, tree, frontier):
"""
Recursively expand and match each elements of ``tree``
specified by ``frontier``, to cover ``remaining_text``. Return
a list of all parses found.
:return: An iterator of all parses that can be generated by
matching and expanding the elements of ``tree``
specified by ``frontier``.
:rtype: iter(Tree)
:type tree: Tree
:param tree: A partial structure for the text that is
currently being parsed. The elements of ``tree``
that are specified by ``frontier`` have not yet been
expanded or matched.
:type remaining_text: list(str)
:param remaining_text: The portion of the text that is not yet
covered by ``tree``.
:type frontier: list(tuple(int))
:param frontier: A list of the locations within ``tree`` of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched. This list sorted
in left-to-right order of location within the tree.
"""
# If the tree covers the text, and there's nothing left to
# expand, then we've found a complete parse; return it.
if len(remaining_text) == 0 and len(frontier) == 0:
if self._trace:
self._trace_succeed(tree, frontier)
yield tree
# If there's still text, but nothing left to expand, we failed.
elif len(frontier) == 0:
if self._trace:
self._trace_backtrack(tree, frontier)
# If the next element on the frontier is a tree, expand it.
elif isinstance(tree[frontier[0]], Tree):
for result in self._expand(remaining_text, tree, frontier):
yield result
# If the next element on the frontier is a token, match it.
else:
for result in self._match(remaining_text, tree, frontier):
yield result
def _match(self, rtext, tree, frontier):
"""
:rtype: iter(Tree)
:return: an iterator of all parses that can be generated by
matching the first element of ``frontier`` against the
first token in ``rtext``. In particular, if the first
element of ``frontier`` has the same type as the first
token in ``rtext``, then substitute the token into
``tree``; and return all parses that can be generated by
matching and expanding the remaining elements of
``frontier``. If the first element of ``frontier`` does not
have the same type as the first token in ``rtext``, then
return empty list.
:type tree: Tree
:param tree: A partial structure for the text that is
currently being parsed. The elements of ``tree``
that are specified by ``frontier`` have not yet been
expanded or matched.
:type rtext: list(str)
:param rtext: The portion of the text that is not yet
covered by ``tree``.
:type frontier: list of tuple of int
:param frontier: A list of the locations within ``tree`` of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched.
"""
tree_leaf = tree[frontier[0]]
if (len(rtext) > 0 and tree_leaf == rtext[0]):
# If it's a terminal that matches rtext[0], then substitute
# in the token, and continue parsing.
newtree = tree.copy(deep=True)
newtree[frontier[0]] = rtext[0]
if self._trace:
self._trace_match(newtree, frontier[1:], rtext[0])
for result in self._parse(rtext[1:], newtree, frontier[1:]):
yield result
else:
# If it's a non-matching terminal, fail.
if self._trace:
self._trace_backtrack(tree, frontier, rtext[:1])
def _expand(self, remaining_text, tree, frontier, production=None):
"""
:rtype: iter(Tree)
:return: An iterator of all parses that can be generated by
expanding the first element of ``frontier`` with
``production``. In particular, if the first element of
``frontier`` is a subtree whose node type is equal to
``production``'s left hand side, then add a child to that
subtree for each element of ``production``'s right hand
side; and return all parses that can be generated by
matching and expanding the remaining elements of
``frontier``. If the first element of ``frontier`` is not a
subtree whose node type is equal to ``production``'s left
hand side, then return an empty list. If ``production`` is
not specified, then return a list of all parses that can
be generated by expanding the first element of ``frontier``
with *any* CFG production.
:type tree: Tree
:param tree: A partial structure for the text that is
currently being parsed. The elements of ``tree``
that are specified by ``frontier`` have not yet been
expanded or matched.
:type remaining_text: list(str)
:param remaining_text: The portion of the text that is not yet
covered by ``tree``.
:type frontier: list(tuple(int))
:param frontier: A list of the locations within ``tree`` of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched.
"""
if production is None: productions = self._grammar.productions()
else: productions = [production]
for production in productions:
lhs = production.lhs().symbol()
if lhs == tree[frontier[0]].label():
subtree = self._production_to_tree(production)
if frontier[0] == ():
newtree = subtree
else:
newtree = tree.copy(deep=True)
newtree[frontier[0]] = subtree
new_frontier = [frontier[0]+(i,) for i in
range(len(production.rhs()))]
if self._trace:
self._trace_expand(newtree, new_frontier, production)
for result in self._parse(remaining_text, newtree,
new_frontier + frontier[1:]):
yield result
def _production_to_tree(self, production):
"""
:rtype: Tree
:return: The Tree that is licensed by ``production``.
In particular, given the production ``[lhs -> elt[1] ... elt[n]]``
return a tree that has a node ``lhs.symbol``, and
``n`` children. For each nonterminal element
``elt[i]`` in the production, the tree token has a
childless subtree with node value ``elt[i].symbol``; and
for each terminal element ``elt[j]``, the tree token has
a leaf token with type ``elt[j]``.
:param production: The CFG production that licenses the tree
token that should be returned.
:type production: Production
"""
children = []
for elt in production.rhs():
if isinstance(elt, Nonterminal):
children.append(Tree(elt.symbol(), []))
else:
# | |
"""Support for Rflink devices."""
import asyncio
from collections import defaultdict
import logging
import async_timeout
from rflink.protocol import create_rflink_connection
from serial import SerialException
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_COMMAND,
CONF_DEVICE_ID,
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
STATE_ON,
)
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.restore_state import RestoreEntity
from .utils import brightness_to_rflink
_LOGGER = logging.getLogger(__name__)
ATTR_EVENT = "event"
CONF_ALIASES = "aliases"
CONF_GROUP_ALIASES = "group_aliases"
CONF_GROUP = "group"
CONF_NOGROUP_ALIASES = "nogroup_aliases"
CONF_DEVICE_DEFAULTS = "device_defaults"
CONF_AUTOMATIC_ADD = "automatic_add"
CONF_FIRE_EVENT = "fire_event"
CONF_IGNORE_DEVICES = "ignore_devices"
CONF_RECONNECT_INTERVAL = "reconnect_interval"
CONF_SIGNAL_REPETITIONS = "signal_repetitions"
CONF_WAIT_FOR_ACK = "wait_for_ack"
CONF_KEEPALIVE_IDLE = "tcp_keepalive_idle_timer"
DATA_DEVICE_REGISTER = "rflink_device_register"
DATA_ENTITY_LOOKUP = "rflink_entity_lookup"
DATA_ENTITY_GROUP_LOOKUP = "rflink_entity_group_only_lookup"
DEFAULT_RECONNECT_INTERVAL = 10
DEFAULT_SIGNAL_REPETITIONS = 1
DEFAULT_TCP_KEEPALIVE_IDLE_TIMER = 3600
CONNECTION_TIMEOUT = 10
EVENT_BUTTON_PRESSED = "button_pressed"
EVENT_KEY_COMMAND = "command"
EVENT_KEY_ID = "id"
EVENT_KEY_SENSOR = "sensor"
EVENT_KEY_UNIT = "unit"
RFLINK_GROUP_COMMANDS = ["allon", "alloff"]
DOMAIN = "rflink"
SERVICE_SEND_COMMAND = "send_command"
SIGNAL_AVAILABILITY = "rflink_device_available"
SIGNAL_HANDLE_EVENT = "rflink_handle_event_{}"
SIGNAL_EVENT = "rflink_event"
TMP_ENTITY = "tmp.{}"
DEVICE_DEFAULTS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(
CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS
): vol.Coerce(int),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PORT): vol.Any(cv.port, cv.string),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_WAIT_FOR_ACK, default=True): cv.boolean,
vol.Optional(
CONF_KEEPALIVE_IDLE, default=DEFAULT_TCP_KEEPALIVE_IDLE_TIMER
): int,
vol.Optional(
CONF_RECONNECT_INTERVAL, default=DEFAULT_RECONNECT_INTERVAL
): int,
vol.Optional(CONF_IGNORE_DEVICES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
SEND_COMMAND_SCHEMA = vol.Schema(
{vol.Required(CONF_DEVICE_ID): cv.string, vol.Required(CONF_COMMAND): cv.string}
)
def identify_event_type(event):
"""Look at event to determine type of device.
Async friendly.
"""
if EVENT_KEY_COMMAND in event:
return EVENT_KEY_COMMAND
if EVENT_KEY_SENSOR in event:
return EVENT_KEY_SENSOR
return "unknown"
async def async_setup(hass, config):
"""Set up the Rflink component."""
# Allow entities to register themselves by device_id to be looked up when
# new rflink events arrive to be handled
hass.data[DATA_ENTITY_LOOKUP] = {
EVENT_KEY_COMMAND: defaultdict(list),
EVENT_KEY_SENSOR: defaultdict(list),
}
hass.data[DATA_ENTITY_GROUP_LOOKUP] = {EVENT_KEY_COMMAND: defaultdict(list)}
# Allow platform to specify function to register new unknown devices
hass.data[DATA_DEVICE_REGISTER] = {}
async def async_send_command(call):
"""Send Rflink command."""
_LOGGER.debug("Rflink command for %s", str(call.data))
if not (
await RflinkCommand.send_command(
call.data.get(CONF_DEVICE_ID), call.data.get(CONF_COMMAND)
)
):
_LOGGER.error("Failed Rflink command for %s", str(call.data))
else:
async_dispatcher_send(
hass,
SIGNAL_EVENT,
{
EVENT_KEY_ID: call.data.get(CONF_DEVICE_ID),
EVENT_KEY_COMMAND: call.data.get(CONF_COMMAND),
},
)
hass.services.async_register(
DOMAIN, SERVICE_SEND_COMMAND, async_send_command, schema=SEND_COMMAND_SCHEMA
)
@callback
def event_callback(event):
"""Handle incoming Rflink events.
Rflink events arrive as dictionaries of varying content
depending on their type. Identify the events and distribute
accordingly.
"""
event_type = identify_event_type(event)
_LOGGER.debug("event of type %s: %s", event_type, event)
# Don't propagate non entity events (eg: version string, ack response)
if event_type not in hass.data[DATA_ENTITY_LOOKUP]:
_LOGGER.debug("unhandled event of type: %s", event_type)
return
# Lookup entities who registered this device id as device id or alias
event_id = event.get(EVENT_KEY_ID)
is_group_event = (
event_type == EVENT_KEY_COMMAND
and event[EVENT_KEY_COMMAND] in RFLINK_GROUP_COMMANDS
)
if is_group_event:
entity_ids = hass.data[DATA_ENTITY_GROUP_LOOKUP][event_type].get(
event_id, []
)
else:
entity_ids = hass.data[DATA_ENTITY_LOOKUP][event_type][event_id]
_LOGGER.debug("entity_ids: %s", entity_ids)
if entity_ids:
# Propagate event to every entity matching the device id
for entity in entity_ids:
_LOGGER.debug("passing event to %s", entity)
async_dispatcher_send(hass, SIGNAL_HANDLE_EVENT.format(entity), event)
elif not is_group_event:
# If device is not yet known, register with platform (if loaded)
if event_type in hass.data[DATA_DEVICE_REGISTER]:
_LOGGER.debug("device_id not known, adding new device")
# Add bogus event_id first to avoid race if we get another
# event before the device is created
# Any additional events received before the device has been
# created will thus be ignored.
hass.data[DATA_ENTITY_LOOKUP][event_type][event_id].append(
TMP_ENTITY.format(event_id)
)
hass.async_create_task(
hass.data[DATA_DEVICE_REGISTER][event_type](event)
)
else:
_LOGGER.debug("device_id not known and automatic add disabled")
# When connecting to tcp host instead of serial port (optional)
host = config[DOMAIN].get(CONF_HOST)
# TCP port when host configured, otherwise serial port
port = config[DOMAIN][CONF_PORT]
keepalive_idle_timer = None
# TCP KeepAlive only if this is TCP based connection (not serial)
if host is not None:
# TCP KEEPALIVE will be enabled if value > 0
keepalive_idle_timer = config[DOMAIN][CONF_KEEPALIVE_IDLE]
if keepalive_idle_timer < 0:
_LOGGER.error(
"A bogus TCP Keepalive IDLE timer was provided (%d secs), "
"it will be disabled. "
"Recommended values: 60-3600 (seconds)",
keepalive_idle_timer,
)
keepalive_idle_timer = None
elif keepalive_idle_timer == 0:
keepalive_idle_timer = None
elif keepalive_idle_timer <= 30:
_LOGGER.warning(
"A very short TCP Keepalive IDLE timer was provided (%d secs) "
"and may produce unexpected disconnections from RFlink device."
" Recommended values: 60-3600 (seconds)",
keepalive_idle_timer,
)
@callback
def reconnect(exc=None):
"""Schedule reconnect after connection has been unexpectedly lost."""
# Reset protocol binding before starting reconnect
RflinkCommand.set_rflink_protocol(None)
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
# If HA is not stopping, initiate new connection
if hass.state != CoreState.stopping:
_LOGGER.warning("Disconnected from Rflink, reconnecting")
hass.async_create_task(connect())
async def connect():
"""Set up connection and hook it into HA for reconnect/shutdown."""
_LOGGER.info("Initiating Rflink connection")
# Rflink create_rflink_connection decides based on the value of host
# (string or None) if serial or tcp mode should be used
# Initiate serial/tcp connection to Rflink gateway
connection = create_rflink_connection(
port=port,
host=host,
keepalive=keepalive_idle_timer,
event_callback=event_callback,
disconnect_callback=reconnect,
loop=hass.loop,
ignore=config[DOMAIN][CONF_IGNORE_DEVICES],
)
try:
async with async_timeout.timeout(CONNECTION_TIMEOUT):
transport, protocol = await connection
except (
SerialException,
OSError,
asyncio.TimeoutError,
) as exc:
reconnect_interval = config[DOMAIN][CONF_RECONNECT_INTERVAL]
_LOGGER.exception(
"Error connecting to Rflink, reconnecting in %s", reconnect_interval
)
# Connection to Rflink device is lost, make entities unavailable
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
hass.loop.call_later(reconnect_interval, reconnect, exc)
return
# There is a valid connection to a Rflink device now so
# mark entities as available
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, True)
# Bind protocol to command class to allow entities to send commands
RflinkCommand.set_rflink_protocol(protocol, config[DOMAIN][CONF_WAIT_FOR_ACK])
# handle shutdown of Rflink asyncio transport
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, lambda x: transport.close()
)
_LOGGER.info("Connected to Rflink")
hass.async_create_task(connect())
async_dispatcher_connect(hass, SIGNAL_EVENT, event_callback)
return True
class RflinkDevice(Entity):
"""Representation of a Rflink device.
Contains the common logic for Rflink entities.
"""
platform = None
_state = None
_available = True
def __init__(
self,
device_id,
initial_event=None,
name=None,
aliases=None,
group=True,
group_aliases=None,
nogroup_aliases=None,
fire_event=False,
signal_repetitions=DEFAULT_SIGNAL_REPETITIONS,
):
"""Initialize the device."""
# Rflink specific attributes for every component type
self._initial_event = initial_event
self._device_id = device_id
if name:
self._name = name
else:
self._name = device_id
self._aliases = aliases
self._group = group
self._group_aliases = group_aliases
self._nogroup_aliases = nogroup_aliases
self._should_fire_event = fire_event
self._signal_repetitions = signal_repetitions
@callback
def handle_event_callback(self, event):
"""Handle incoming event for device type."""
# Call platform specific event handler
self._handle_event(event)
# Propagate changes through ha
self.async_write_ha_state()
# Put command onto bus for user to subscribe to
if self._should_fire_event and identify_event_type(event) == EVENT_KEY_COMMAND:
self.hass.bus.async_fire(
EVENT_BUTTON_PRESSED,
{ATTR_ENTITY_ID: self.entity_id, ATTR_STATE: event[EVENT_KEY_COMMAND]},
)
_LOGGER.debug(
"Fired bus event for %s: %s", self.entity_id, event[EVENT_KEY_COMMAND]
)
def _handle_event(self, event):
"""Platform specific event handler."""
raise NotImplementedError()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return a name for the device."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
if self.assumed_state:
return False
return self._state
@property
def assumed_state(self):
"""Assume device state until first device event sets state."""
return self._state is None
@property
def available(self):
"""Return True if entity is available."""
return self._available
@callback
def _availability_callback(self, availability):
"""Update availability state."""
self._available = availability
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register update callback."""
await super().async_added_to_hass()
# Remove temporary bogus entity_id if added
tmp_entity = TMP_ENTITY.format(self._device_id)
if (
tmp_entity
in self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][self._device_id]
):
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][
self._device_id
].remove(tmp_entity)
# Register id and aliases
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][self._device_id].append(
self.entity_id
)
if self._group:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][
self._device_id
].append(self.entity_id)
# aliases respond to both normal and group commands (allon/alloff)
if self._aliases:
for _id in self._aliases:
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
# group_aliases only respond to group commands (allon/alloff)
if self._group_aliases:
for _id in self._group_aliases:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
# nogroup_aliases only respond to normal commands
if self._nogroup_aliases:
for _id in self._nogroup_aliases:
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_AVAILABILITY, self._availability_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_HANDLE_EVENT.format(self.entity_id),
self.handle_event_callback,
)
)
# Process the initial event now that the entity is created
if self._initial_event:
self.handle_event_callback(self._initial_event)
class RflinkCommand(RflinkDevice):
"""Singleton class to make Rflink command interface available to entities.
This class is to be inherited by every Entity class that is actionable
(switches/lights). It exposes the Rflink command interface for these
entities.
The Rflink interface is managed as a class level and set during setup (and
reset on reconnect).
"""
# Keep repetition tasks to cancel if state is changed before repetitions
# are sent
_repetition_task = None
_protocol = None
@classmethod
def set_rflink_protocol(cls, protocol, wait_ack=None):
"""Set the Rflink asyncio protocol as a class variable."""
cls._protocol = protocol
if wait_ack is not None:
cls._wait_ack = wait_ack
@classmethod
def is_connected(cls):
"""Return connection status."""
return bool(cls._protocol)
@classmethod
async def send_command(cls, device_id, action):
"""Send device command to Rflink | |
import bpy
import bgl
import gpu
import blf
import bmesh
from bpy_extras import view3d_utils
from math import floor, ceil, copysign
from bgl import *
from bpy.props import *
from mathutils import Vector, Matrix
from . import sprytile_utils, sprytile_modal
from gpu_extras.batch import batch_for_shader
from sprytile_tools.tool_build import ToolBuild
from sprytile_tools.tool_paint import ToolPaint
import sprytile_preview
# Shaders
flat_vertex_shader = '''
uniform mat4 u_modelViewProjectionMatrix;
in vec2 i_position;
in vec4 i_color;
out vec4 o_color;
void main()
{
o_color = i_color;
gl_Position = u_modelViewProjectionMatrix * vec4(i_position, 0.0, 1.0);
}
'''
flat_fragment_shader = '''
in vec4 o_color;
out vec4 frag_color;
void main()
{
frag_color = o_color;
}
'''
image_vertex_shader = '''
uniform mat4 u_modelViewProjectionMatrix;
in vec2 i_position;
in vec4 i_color;
in vec2 i_uv;
out vec2 o_uv;
out vec4 o_color;
void main()
{
o_uv = i_uv;
o_color = i_color;
gl_Position = u_modelViewProjectionMatrix * vec4(i_position, 0.0, 1.0);
}
'''
image_fragment_shader = '''
uniform sampler2D u_image;
uniform float u_correct;
in vec2 o_uv;
in vec4 o_color;
out vec4 frag_color;
void main()
{
vec4 col = texture(u_image, o_uv) * o_color;
frag_color = pow(col, vec4(u_correct));
}
'''
flat_shader = gpu.types.GPUShader(flat_vertex_shader, flat_fragment_shader)
image_shader = gpu.types.GPUShader(image_vertex_shader, image_fragment_shader)
class SprytileGuiData(bpy.types.PropertyGroup):
zoom : FloatProperty(
name="Sprytile UI zoom",
default=1.0
)
use_mouse : BoolProperty(name="GUI use mouse")
middle_btn : BoolProperty(name="GUI middle mouse")
is_dirty : BoolProperty(name="Srpytile GUI redraw flag")
class VIEW3D_OP_SprytileGui(bpy.types.Operator):
bl_idname = "sprytile.gui_win"
bl_label = "Sprytile GUI"
mouse_pt = None
label_frames = 30
is_selecting = False
is_moving = False
sel_start = None
sel_origin = None
is_running = False
tile_ui_active = False
out_of_region = False
build_previews = {
'MAKE_FACE' : ToolBuild,
'PAINT' : ToolPaint,
'SET_NORMAL' : None,
'FILL' : None
}
# ================
# Modal functions
# ================
@classmethod
def poll(cls, context):
return context.area.type == 'VIEW_3D'
def invoke(self, context, event):
if context.space_data.type != 'VIEW_3D':
return {'CANCELLED'}
if len(context.scene.sprytile_mats) < 1:
return {'CANCELLED'}
# Try to setup offscreen
setup_off_return = VIEW3D_OP_SprytileGui.setup_offscreen(self, context)
if setup_off_return is not None:
return setup_off_return
self.label_counter = 0
self.get_zoom_level(context)
self.prev_in_region = False
self.handle_ui(context, event)
# Add the draw handler call back, for drawing into viewport
VIEW3D_OP_SprytileGui.handler_add(self, context, context.region)
if context.area:
context.area.tag_redraw()
context.scene.sprytile_ui.is_dirty = True
VIEW3D_OP_SprytileGui.is_running = True
# Add actual modal handler
context.window_manager.modal_handler_add(self)
# Add timer event
win_mgr = context.window_manager
self.win_timer = win_mgr.event_timer_add(0.1, window=context.window)
# Update view axis
self.update_view_axis(context)
return {'RUNNING_MODAL'}
def update_view_axis(self, context):
sprytile_data = context.scene.sprytile_data
view_axis = sprytile_modal.VIEW3D_OP_SprytileModalTool.find_view_axis(context)
if view_axis is not None:
if view_axis != sprytile_data.normal_mode:
sprytile_data.normal_mode = view_axis
sprytile_data.lock_normal = False
def modal(self, context, event):
if context.area is None:
self.exit(context)
return {'CANCELLED'}
if not sprytile_utils.get_current_tool(context).startswith("sprytile"):
self.exit(context)
return {'CANCELLED'}
if context.mode != 'EDIT_MESH':
self.exit(context)
return {'CANCELLED'}
elif not VIEW3D_OP_SprytileGui.is_running:
VIEW3D_OP_SprytileGui.is_running = True
# Check that the mouse is inside the region
region = context.region
coord = Vector((event.mouse_region_x, event.mouse_region_y))
VIEW3D_OP_SprytileGui.out_of_region = coord.x < 0 or coord.y < 0 or coord.x > region.width or coord.y > region.height
if event.type == 'TIMER':
self.update_view_axis(context)
if self.label_counter > 0:
self.label_counter -= 1
# Check if current_grid is different from current sprytile grid
if context.object.sprytile_gridid != VIEW3D_OP_SprytileGui.current_grid:
# Setup the offscreen texture for the new grid
setup_off_return = VIEW3D_OP_SprytileGui.setup_offscreen(self, context)
if setup_off_return is not None:
return setup_off_return
# Skip redrawing on this frame
return {'PASS_THROUGH'}
ret_val = self.handle_ui(context, event)
VIEW3D_OP_SprytileGui.tile_ui_active = ret_val == 'RUNNING_MODAL'
# Build the data that will be used by tool observers
rv3d = context.region_data
coord = event.mouse_region_x, event.mouse_region_y
no_data = rv3d is None
if no_data is False:
# get the ray from the viewport and mouse
ray_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
mode = bpy.context.scene.sprytile_data.paint_mode
if VIEW3D_OP_SprytileGui.build_previews[mode]:
sprytile_modal.VIEW3D_OP_SprytileModalTool.verify_bmesh_layers(bmesh.from_edit_mesh(context.object.data))
VIEW3D_OP_SprytileGui.build_previews[mode].build_preview(context, context.scene, ray_origin, ray_vector)
else:
sprytile_preview.set_preview_data(None, None)
context.scene.sprytile_ui.is_dirty = False
context.area.tag_redraw()
return {ret_val}
def exit(self, context):
VIEW3D_OP_SprytileGui.handler_remove(self, context)
VIEW3D_OP_SprytileGui.is_running = False
VIEW3D_OP_SprytileGui.tile_ui_active = False
if hasattr(self, "win_timer"):
context.window_manager.event_timer_remove(self.win_timer)
if context.area is not None:
context.area.tag_redraw()
def set_zoom_level(self, context, zoom_shift):
region = context.region
zoom_level = context.scene.sprytile_ui.zoom
zoom_level = self.calc_zoom(region, zoom_level, zoom_shift)
display_size = VIEW3D_OP_SprytileGui.display_size
calc_size = round(display_size[0] * zoom_level), round(display_size[1] * zoom_level)
height_min = min(128, display_size[1])
while calc_size[1] < height_min:
zoom_level = self.calc_zoom(region, zoom_level, 1)
calc_size = round(display_size[0] * zoom_level), round(display_size[1] * zoom_level)
while calc_size[0] > region.width or calc_size[1] > region.height:
zoom_level = self.calc_zoom(region, zoom_level, -1)
calc_size = round(display_size[0] * zoom_level), round(display_size[1] * zoom_level)
context.scene.sprytile_ui.zoom = zoom_level
def calc_zoom(self, region, zoom, steps):
if steps == 0:
return zoom
step = copysign(1, steps)
count = 0
while count != steps:
# Zooming in
if steps > 0:
if zoom >= 2.0:
zoom += 0.5
elif zoom >= 0.25:
zoom += 0.25
else:
zoom *= 2
# Zooming out
else:
if zoom <= 0.25:
zoom *= 0.5
elif zoom <= 2.0:
zoom -= 0.25
else:
zoom -= 0.5
count += step
if VIEW3D_OP_SprytileGui.display_size[1] > region.height:
zoom = min(region.height / VIEW3D_OP_SprytileGui.display_size[1], zoom)
return zoom
def get_zoom_level(self, context):
region = context.region
display_size = VIEW3D_OP_SprytileGui.display_size
target_height = region.height * 0.35
zoom_level = round(region.height / display_size[1])
if zoom_level <= 0:
zoom_level = self.calc_zoom(region, 1, -1)
calc_height = round(display_size[1] * zoom_level)
while calc_height > target_height:
zoom_level = self.calc_zoom(region, zoom_level, -1)
calc_height = round(display_size[1] * zoom_level)
context.scene.sprytile_ui.zoom = zoom_level
def handle_ui(self, context, event):
if event.type in {'LEFTMOUSE', 'MOUSEMOVE'}:
self.mouse_pt = Vector((event.mouse_region_x, event.mouse_region_y))
mouse_pt = self.mouse_pt
region = context.region
obj = context.object
ret_val = 'RUNNING_MODAL'
tilegrid = sprytile_utils.get_grid(context, obj.sprytile_gridid)
tex_size = VIEW3D_OP_SprytileGui.tex_size
display_scale = context.scene.sprytile_ui.zoom
display_size = VIEW3D_OP_SprytileGui.display_size
display_size = round(display_size[0] * display_scale), round(display_size[1] * display_scale)
display_pad_x = 30
display_pad_y = 5
gui_min = Vector((region.width - (int(display_size[0]) + display_pad_x), display_pad_y))
gui_max = Vector((region.width - display_pad_x, (int(display_size[1]) + display_pad_y)))
self.gui_min = gui_min
self.gui_max = gui_max
reject_region = context.space_data.type != 'VIEW_3D' or region.type != 'WINDOW'
if event is None or reject_region:
ret_val = 'PASS_THROUGH'
return ret_val
if event.type == 'MIDDLEMOUSE':
context.scene.sprytile_ui.middle_btn = True
if context.scene.sprytile_ui.middle_btn and event.value == 'RELEASE':
context.scene.sprytile_ui.middle_btn = False
if mouse_pt is not None and event.type in {'MOUSEMOVE'}:
mouse_in_region = 0 <= mouse_pt.x <= region.width and 0 <= mouse_pt.y <= region.height
mouse_in_gui = gui_min.x <= mouse_pt.x <= gui_max.x and gui_min.y <= mouse_pt.y <= gui_max.y
context.scene.sprytile_ui.use_mouse = mouse_in_gui
self.prev_in_region = mouse_in_region
if context.scene.sprytile_ui.use_mouse is False:
ret_val = 'PASS_THROUGH'
return ret_val
if event.type in {'WHEELUPMOUSE', 'WHEELDOWNMOUSE'}:
if event.ctrl is False:
zoom_shift = 1 if event.type == 'WHEELUPMOUSE' else -1
self.set_zoom_level(context, zoom_shift)
else:
direction = 1 if 'DOWN' in event.type else -1
bpy.ops.sprytile.grid_cycle('INVOKE_REGION_WIN', direction=direction)
self.label_counter = VIEW3D_OP_SprytileGui.label_frames
if mouse_pt is not None and event.type in {'LEFTMOUSE', 'MOUSEMOVE'}:
click_pos = Vector((mouse_pt.x - gui_min.x, mouse_pt.y - gui_min.y))
ratio_pos = Vector((click_pos.x / display_size[0], click_pos.y / display_size[1]))
tex_pos = Vector((ratio_pos.x * tex_size[0], ratio_pos.y * tex_size[1], 0))
# Apply grid matrix to tex_pos
grid_matrix = sprytile_utils.get_grid_matrix(VIEW3D_OP_SprytileGui.loaded_grid)
tex_pos = grid_matrix.inverted() @ tex_pos
grid_max = Vector((ceil(tex_size[0]/tilegrid.grid[0])-1, ceil(tex_size[1]/tilegrid.grid[1])-1))
cell_size = Vector((
tilegrid.grid[0] + (tilegrid.padding[0] * 2) + tilegrid.margin[1] + tilegrid.margin[3],
tilegrid.grid[1] + (tilegrid.padding[1] * 2) + tilegrid.margin[0] + tilegrid.margin[2]
))
grid_pos = Vector((tex_pos.x / cell_size.x, tex_pos.y / cell_size.y))
grid_pos.x = max(0, min(grid_max.x, floor(grid_pos.x)))
grid_pos.y = max(0, min(grid_max.y, floor(grid_pos.y)))
VIEW3D_OP_SprytileGui.cursor_grid_pos = grid_pos
if event.type == 'LEFTMOUSE' and event.value == 'PRESS' and VIEW3D_OP_SprytileGui.is_selecting is False:
addon_prefs = context.preferences.addons[__package__].preferences
move_mod_pressed = False
#if addon_prefs.tile_sel_move_key == 'Alt':
# move_mod_pressed = event.alt
#if addon_prefs.tile_sel_move_key == 'Ctrl':
# move_mod_pressed = event.ctrl
#if addon_prefs.tile_sel_move_key == 'Shift':
# move_mod_pressed = event.shift
VIEW3D_OP_SprytileGui.is_selecting = move_mod_pressed is False
VIEW3D_OP_SprytileGui.is_moving = move_mod_pressed is True
if VIEW3D_OP_SprytileGui.is_selecting or VIEW3D_OP_SprytileGui.is_moving:
VIEW3D_OP_SprytileGui.sel_start = grid_pos
VIEW3D_OP_SprytileGui.sel_origin = (tilegrid.tile_selection[0], tilegrid.tile_selection[1])
if VIEW3D_OP_SprytileGui.is_moving:
move_delta = Vector((grid_pos.x - VIEW3D_OP_SprytileGui.sel_start.x, grid_pos.y - VIEW3D_OP_SprytileGui.sel_start.y))
# Restrict movement inside tile grid
move_min = (VIEW3D_OP_SprytileGui.sel_origin[0] + move_delta.x,
VIEW3D_OP_SprytileGui.sel_origin[1] + move_delta.y)
if move_min[0] < 0:
move_delta.x -= move_min[0]
if move_min[1] < 0:
move_delta.y -= move_min[1]
move_max = (move_min[0] + tilegrid.tile_selection[2] - 1,
move_min[1] + tilegrid.tile_selection[3] - 1)
if move_max[0] > grid_max.x:
move_delta.x -= (move_max[0] - grid_max.x)
if move_max[1] > grid_max.y:
move_delta.y -= (move_max[1] - grid_max.y)
tilegrid.tile_selection[0] = VIEW3D_OP_SprytileGui.sel_origin[0] + move_delta.x
tilegrid.tile_selection[1] = VIEW3D_OP_SprytileGui.sel_origin[1] + move_delta.y
if VIEW3D_OP_SprytileGui.is_selecting:
sel_min = Vector((
min(grid_pos.x, VIEW3D_OP_SprytileGui.sel_start.x),
min(grid_pos.y, VIEW3D_OP_SprytileGui.sel_start.y)
))
sel_max = Vector((
max(grid_pos.x, VIEW3D_OP_SprytileGui.sel_start.x),
max(grid_pos.y, VIEW3D_OP_SprytileGui.sel_start.y)
))
tilegrid.tile_selection[0] = sel_min.x
tilegrid.tile_selection[1] = sel_min.y
tilegrid.tile_selection[2] = (sel_max.x - sel_min.x) + 1
tilegrid.tile_selection[3] = (sel_max.y - sel_min.y) + 1
do_release = event.type == 'LEFTMOUSE' and event.value == 'RELEASE'
if do_release and (VIEW3D_OP_SprytileGui.is_selecting or VIEW3D_OP_SprytileGui.is_moving):
VIEW3D_OP_SprytileGui.is_selecting = False
VIEW3D_OP_SprytileGui.is_moving = False
VIEW3D_OP_SprytileGui.sel_start = None
VIEW3D_OP_SprytileGui.sel_origin = None
# Cycle through grids | |
# NLP written by GAMS Convert at 04/21/18 13:52:23
#
# Equation counts
# Total E G L N X C B
# 112 41 41 30 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 78 78 0 0 0 0 0 0
# FX 5 5 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 369 325 44 0
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(12.735,12.735),initialize=12.735)
m.x2 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,140),initialize=0)
m.x34 = Var(within=Reals,bounds=(0.1,0.1),initialize=0.1)
m.x35 = Var(within=Reals,bounds=(0.1,10000),initialize=0.1)
m.x36 = Var(within=Reals,bounds=(0.1,10000),initialize=0.1)
m.x37 = Var(within=Reals,bounds=(0.1,10000),initialize=0.1)
m.x38 = Var(within=Reals,bounds=(0.1,10000),initialize=0.1)
m.x39 = Var(within=Reals,bounds=(0.1,10000),initialize=0.1)
m.x40 = Var(within=Reals,bounds=(0.1,10000),initialize=0.1)
m.x41 = Var(within=Reals,bounds=(0.1,10000),initialize=0.1)
m.x42 = Var(within=Reals,bounds=(0.1,10000),initialize=0.1)
m.x43 = Var(within=Reals,bounds=(0.1,10000),initialize=0.1)
m.x44 = Var(within=Reals,bounds=(0.1,10000),initialize=0.1)
m.x45 = Var(within=Reals,bounds=(0.2,0.2),initialize=0.2)
m.x46 = Var(within=Reals,bounds=(0.2,10000),initialize=0.2)
m.x47 = Var(within=Reals,bounds=(0.2,10000),initialize=0.2)
m.x48 = Var(within=Reals,bounds=(0.2,10000),initialize=0.2)
m.x49 = Var(within=Reals,bounds=(0.2,10000),initialize=0.2)
m.x50 = Var(within=Reals,bounds=(0.2,10000),initialize=0.2)
m.x51 = Var(within=Reals,bounds=(0.2,10000),initialize=0.2)
m.x52 = Var(within=Reals,bounds=(0.2,10000),initialize=0.2)
m.x53 = Var(within=Reals,bounds=(0.2,10000),initialize=0.2)
m.x54 = Var(within=Reals,bounds=(0.2,10000),initialize=0.2)
m.x55 = Var(within=Reals,bounds=(0.2,10000),initialize=0.2)
m.x56 = Var(within=Reals,bounds=(0.01,0.01),initialize=0.01)
m.x57 = Var(within=Reals,bounds=(0.01,10000),initialize=0.01)
m.x58 = Var(within=Reals,bounds=(0.01,10000),initialize=0.01)
m.x59 = Var(within=Reals,bounds=(0.01,10000),initialize=0.01)
m.x60 = Var(within=Reals,bounds=(0.01,10000),initialize=0.01)
m.x61 = Var(within=Reals,bounds=(0.01,10000),initialize=0.01)
m.x62 = Var(within=Reals,bounds=(0.01,10000),initialize=0.01)
m.x63 = Var(within=Reals,bounds=(0.01,10000),initialize=0.01)
m.x64 = Var(within=Reals,bounds=(0.01,10000),initialize=0.01)
m.x65 = Var(within=Reals,bounds=(0.01,10000),initialize=0.01)
m.x66 = Var(within=Reals,bounds=(0.01,10000),initialize=0.01)
m.x67 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,400),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,400),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,400),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,400),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,400),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,400),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,400),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,400),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,400),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,400),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,30000),initialize=0)
m.obj = Objective(expr=m.x78, sense=minimize)
m.c1 = Constraint(expr= m.x1 + m.x12 + m.x23 >= 12.735)
m.c2 = Constraint(expr= m.x2 + m.x13 + m.x24 >= 18.523)
m.c3 = Constraint(expr= m.x3 + m.x14 + m.x25 >= 24.42)
m.c4 = Constraint(expr= m.x4 + m.x15 + m.x26 >= 30.729)
m.c5 = Constraint(expr= m.x5 + m.x16 + m.x27 >= 41.698)
m.c6 = Constraint(expr= m.x6 + m.x17 + m.x28 >= 52.802)
m.c7 = Constraint(expr= m.x7 + m.x18 + m.x29 >= 65.155)
m.c8 = Constraint(expr= m.x8 + m.x19 + m.x30 >= 81.675)
m.c9 = Constraint(expr= m.x9 + m.x20 + m.x31 >= 98.667)
m.c10 = Constraint(expr= m.x10 + m.x21 + m.x32 >= 115.501)
m.c11 = Constraint(expr= m.x11 + m.x22 + m.x33 >= 133.561)
m.c12 = Constraint(expr= - 0.744093914896725*m.x1 + m.x2 >= 0)
m.c13 = Constraint(expr= - 0.744093914896725*m.x2 + m.x3 >= 0)
m.c14 = Constraint(expr= - 0.744093914896725*m.x3 + m.x4 >= 0)
m.c15 = Constraint(expr= - 0.744093914896725*m.x4 + m.x5 >= 0)
m.c16 = Constraint(expr= - 0.744093914896725*m.x5 + m.x6 >= 0)
m.c17 = Constraint(expr= - 0.744093914896725*m.x6 + m.x7 >= 0)
m.c18 = Constraint(expr= - 0.744093914896725*m.x7 + m.x8 >= 0)
m.c19 = Constraint(expr= - 0.744093914896725*m.x8 + m.x9 >= 0)
m.c20 = Constraint(expr= - 0.744093914896725*m.x9 + m.x10 >= 0)
m.c21 = Constraint(expr= - 0.744093914896725*m.x10 + m.x11 >= 0)
m.c22 = Constraint(expr= - 0.744093914896725*m.x12 + m.x13 >= 0)
m.c23 = Constraint(expr= - 0.744093914896725*m.x13 + m.x14 >= 0)
m.c24 = Constraint(expr= - 0.744093914896725*m.x14 + m.x15 >= 0)
m.c25 = Constraint(expr= - 0.744093914896725*m.x15 + m.x16 >= 0)
m.c26 = Constraint(expr= - 0.744093914896725*m.x16 + m.x17 >= 0)
m.c27 = Constraint(expr= - 0.744093914896725*m.x17 + m.x18 >= 0)
m.c28 = Constraint(expr= - 0.744093914896725*m.x18 + m.x19 >= 0)
m.c29 = Constraint(expr= - 0.744093914896725*m.x19 + m.x20 >= 0)
m.c30 = Constraint(expr= - 0.744093914896725*m.x20 + m.x21 >= 0)
m.c31 = Constraint(expr= - 0.744093914896725*m.x21 + m.x22 >= 0)
m.c32 = Constraint(expr= - 0.744093914896725*m.x23 + m.x24 >= 0)
m.c33 = Constraint(expr= - 0.744093914896725*m.x24 + m.x25 >= 0)
m.c34 = Constraint(expr= - 0.744093914896725*m.x25 + m.x26 >= 0)
m.c35 = Constraint(expr= - 0.744093914896725*m.x26 + m.x27 >= 0)
m.c36 = Constraint(expr= - 0.744093914896725*m.x27 + m.x28 >= 0)
m.c37 = Constraint(expr= - 0.744093914896725*m.x28 + m.x29 >= 0)
m.c38 = Constraint(expr= - 0.744093914896725*m.x29 + m.x30 >= 0)
m.c39 = Constraint(expr= - 0.744093914896725*m.x30 + m.x31 >= 0)
m.c40 = Constraint(expr= - 0.744093914896725*m.x31 + m.x32 >= 0)
m.c41 = Constraint(expr= - 0.744093914896725*m.x32 + m.x33 >= 0)
m.c42 = Constraint(expr= - 4*m.x1 + m.x2 <= 0.18523)
m.c43 = Constraint(expr= - 4*m.x2 + m.x3 <= 0.2442)
m.c44 = Constraint(expr= - 4*m.x3 + m.x4 <= 0.30729)
m.c45 = Constraint(expr= - 4*m.x4 + m.x5 <= 0.41698)
m.c46 = Constraint(expr= - 4*m.x5 + m.x6 <= 0.52802)
m.c47 = Constraint(expr= - 4*m.x6 + m.x7 <= 0.65155)
m.c48 = Constraint(expr= - 4*m.x7 + m.x8 <= 0.81675)
m.c49 = Constraint(expr= - 4*m.x8 + m.x9 <= 0.98667)
m.c50 = Constraint(expr= - 4*m.x9 + m.x10 <= 1.15501)
m.c51 = Constraint(expr= - 4*m.x10 + m.x11 <= 1.33561)
m.c52 = Constraint(expr= - 4*m.x12 + m.x13 <= 0.18523)
m.c53 = Constraint(expr= - 4*m.x13 + m.x14 <= 0.2442)
m.c54 = Constraint(expr= - 4*m.x14 + m.x15 <= 0.30729)
m.c55 = Constraint(expr= - 4*m.x15 + m.x16 <= 0.41698)
m.c56 = Constraint(expr= - 4*m.x16 + m.x17 <= 0.52802)
m.c57 = Constraint(expr= - 4*m.x17 + m.x18 <= 0.65155)
m.c58 = Constraint(expr= - 4*m.x18 + m.x19 <= 0.81675)
m.c59 = Constraint(expr= - 4*m.x19 + m.x20 <= 0.98667)
m.c60 = Constraint(expr= - 4*m.x20 + m.x21 <= 1.15501)
m.c61 = Constraint(expr= - 4*m.x21 + m.x22 <= 1.33561)
m.c62 = Constraint(expr= - 4*m.x23 + m.x24 <= 0.18523)
m.c63 = Constraint(expr= - 4*m.x24 + m.x25 <= 0.2442)
m.c64 = Constraint(expr= - 4*m.x25 + m.x26 <= 0.30729)
m.c65 = Constraint(expr= - 4*m.x26 + m.x27 <= 0.41698)
m.c66 = Constraint(expr= - 4*m.x27 + m.x28 <= 0.52802)
m.c67 = Constraint(expr= - 4*m.x28 + m.x29 <= 0.65155)
m.c68 = Constraint(expr= - 4*m.x29 + m.x30 <= 0.81675)
m.c69 = Constraint(expr= - 4*m.x30 + m.x31 <= 0.98667)
m.c70 = Constraint(expr= - 4*m.x31 + m.x32 <= 1.15501)
m.c71 = Constraint(expr= - 4*m.x32 + m.x33 <= 1.33561)
m.c72 = Constraint(expr= - 5*m.x1 - 5*m.x2 - m.x34 + m.x35 == 0)
m.c73 = Constraint(expr= - 5*m.x2 - 5*m.x3 - m.x35 + m.x36 == 0)
m.c74 = Constraint(expr= - 5*m.x3 - 5*m.x4 - m.x36 + m.x37 == 0)
m.c75 = Constraint(expr= - 5*m.x4 - 5*m.x5 - m.x37 + m.x38 == 0)
m.c76 = Constraint(expr= - 5*m.x5 - 5*m.x6 - m.x38 + m.x39 == 0)
m.c77 = Constraint(expr= - 5*m.x6 - 5*m.x7 - m.x39 + m.x40 == 0)
m.c78 = Constraint(expr= - 5*m.x7 - 5*m.x8 - m.x40 + m.x41 == 0)
m.c79 = Constraint(expr= - 5*m.x8 - 5*m.x9 - m.x41 + m.x42 == 0)
m.c80 = Constraint(expr= - 5*m.x9 - 5*m.x10 - m.x42 + m.x43 == 0)
m.c81 = Constraint(expr= - 5*m.x10 - 5*m.x11 - m.x43 + m.x44 == 0)
m.c82 = Constraint(expr= - 5*m.x12 - 5*m.x13 - m.x45 + m.x46 == 0)
m.c83 = Constraint(expr= - 5*m.x13 - 5*m.x14 - m.x46 + m.x47 == 0)
m.c84 = Constraint(expr= - 5*m.x14 - 5*m.x15 - m.x47 + m.x48 == 0)
m.c85 = Constraint(expr= - 5*m.x15 - 5*m.x16 - m.x48 + m.x49 == 0)
m.c86 = Constraint(expr= - 5*m.x16 - 5*m.x17 - m.x49 + m.x50 == 0)
m.c87 = Constraint(expr= - 5*m.x17 - 5*m.x18 - m.x50 + m.x51 == 0)
m.c88 = Constraint(expr= - 5*m.x18 - 5*m.x19 - m.x51 + m.x52 == 0)
m.c89 = Constraint(expr= - 5*m.x19 - 5*m.x20 - m.x52 + m.x53 == 0)
m.c90 = Constraint(expr= - 5*m.x20 - 5*m.x21 - m.x53 + m.x54 == 0)
m.c91 = Constraint(expr= - 5*m.x21 - 5*m.x22 - m.x54 + m.x55 == 0)
m.c92 = Constraint(expr= - 5*m.x23 - 5*m.x24 - m.x56 + m.x57 == 0)
m.c93 = Constraint(expr= - 5*m.x24 - 5*m.x25 - m.x57 + m.x58 == 0)
m.c94 = Constraint(expr= - 5*m.x25 - 5*m.x26 - m.x58 + m.x59 == 0)
m.c95 = Constraint(expr= - 5*m.x26 - 5*m.x27 - m.x59 + m.x60 == 0)
m.c96 = Constraint(expr= - 5*m.x27 - 5*m.x28 - m.x60 + m.x61 == 0)
m.c97 = Constraint(expr= - 5*m.x28 - 5*m.x29 - m.x61 + m.x62 == 0)
m.c98 = Constraint(expr= - 5*m.x29 - 5*m.x30 - m.x62 + m.x63 == 0)
m.c99 = Constraint(expr= - 5*m.x30 - 5*m.x31 - m.x63 + m.x64 == 0)
m.c100 = Constraint(expr= - 5*m.x31 - 5*m.x32 - m.x64 + m.x65 == 0)
m.c101 = Constraint(expr= - 5*m.x32 - 5*m.x33 - m.x65 + m.x66 == 0)
m.c102 = Constraint(expr= - 0.850412249705536*m.x1 - 0.850412249705536*m.x2 - m.x67 + m.x68 == 0)
m.c103 = Constraint(expr= - 0.850412249705536*m.x2 - 0.850412249705536*m.x3 - m.x68 + m.x69 == 0)
m.c104 = Constraint(expr= - 0.850412249705536*m.x3 - 0.850412249705536*m.x4 - m.x69 + m.x70 == 0)
m.c105 = Constraint(expr= - 0.850412249705536*m.x4 - 0.850412249705536*m.x5 - m.x70 + m.x71 == 0)
m.c106 = Constraint(expr= - 0.850412249705536*m.x5 - 0.850412249705536*m.x6 - m.x71 + m.x72 == 0)
m.c107 = Constraint(expr= - 0.850412249705536*m.x6 - 0.850412249705536*m.x7 - m.x72 + m.x73 == 0)
m.c108 = Constraint(expr= - 0.850412249705536*m.x7 - 0.850412249705536*m.x8 - m.x73 + m.x74 == 0)
m.c109 = Constraint(expr= - 0.850412249705536*m.x8 - 0.850412249705536*m.x9 - m.x74 + m.x75 == 0)
m.c110 = Constraint(expr= - 0.850412249705536*m.x9 - | |
'861847564':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861847565':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861847566':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861847567':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861847560':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861847561':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861847562':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861847563':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86185688':{'en': 'Anyang, Henan', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'86185689':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861867839':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'86185684':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'86185685':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'86185680':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')},
'86186703':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861867225':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u6069\u65bd\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861839428':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861839429':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e86\u9633\u5e02')},
'861867224':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u6069\u65bd\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861839424':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5b9a\u897f\u5e02')},
'861839425':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5b9a\u897f\u5e02')},
'861839426':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5b9a\u897f\u5e02')},
'861839427':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5b9a\u897f\u5e02')},
'861839420':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u4e34\u590f\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861839421':{'en': 'Tianshui, Gansu', 'zh': u('\u7518\u8083\u7701\u5929\u6c34\u5e02')},
'861839422':{'en': 'Tianshui, Gansu', 'zh': u('\u7518\u8083\u7701\u5929\u6c34\u5e02')},
'861839423':{'en': 'Tianshui, Gansu', 'zh': u('\u7518\u8083\u7701\u5929\u6c34\u5e02')},
'861867226':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861867221':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861867220':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861867223':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861867222':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861867835':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'86186705':{'en': 'Chenzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u90f4\u5dde\u5e02')},
'861867837':{'en': 'Zaozhuang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u67a3\u5e84\u5e02')},
'861848913':{'en': 'Shannan, Tibet', 'zh': u('\u897f\u85cf\u5c71\u5357\u5730\u533a')},
'861867229':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861867228':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'86186706':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861860949':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861860948':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'86186707':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861846938':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861846939':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'86184810':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'86184811':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'86184816':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'86184817':{'en': 'Nanchong, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'86184815':{'en': 'Liangshan, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u51c9\u5c71\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861846930':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861846931':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861846932':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861846933':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861846934':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861846935':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861846936':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861846937':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861860941':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u7518\u5357\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861838779':{'en': 'Yuxi, Yunnan', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861838778':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861850656':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861850657':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861850654':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861850655':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861850652':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861850653':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861850650':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861850651':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861853301':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861853300':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861853303':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861853302':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861853305':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861853304':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861853307':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861853306':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861853309':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861853308':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861853655':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')},
'861853654':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861853653':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')},
'861853652':{'en': 'Jinzhong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861853651':{'en': 'Jinzhong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861853650':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')},
'861861397':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861861396':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861861395':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861861394':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861861393':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5f20\u5bb6\u754c\u5e02')},
'861861392':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u6e58\u897f\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861861391':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861861390':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861861399':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861861398':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861847811':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861855910':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861847814':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861856085':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861856084':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861856087':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861856086':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861856081':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861856080':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861856083':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861856082':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861856089':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861856088':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861867706':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861862987':{'en': 'Siping, Jilin', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')},
'861867704':{'en': 'Fangchenggang, Guangxi', 'zh': u('\u5e7f\u897f\u9632\u57ce\u6e2f\u5e02')},
'861867705':{'en': 'Nanning, Guangxi', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861867702':{'en': 'Fangchenggang, Guangxi', 'zh': u('\u5e7f\u897f\u9632\u57ce\u6e2f\u5e02')},
'861867703':{'en': 'Fangchenggang, Guangxi', 'zh': u('\u5e7f\u897f\u9632\u57ce\u6e2f\u5e02')},
'861867700':{'en': 'Fangchenggang, Guangxi', 'zh': u('\u5e7f\u897f\u9632\u57ce\u6e2f\u5e02')},
'861862986':{'en': 'Siping, Jilin', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')},
'861862985':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')},
'861867708':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861867709':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861862984':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')},
'861862983':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')},
'861847816':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u76ca\u9633\u5e02')},
'861862982':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')},
'861862981':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')},
'861862980':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u8fbd\u6e90\u5e02')},
'861843418':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')},
'861843419':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')},
'861859242':{'en': 'Ankang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b89\u5eb7\u5e02')},
'861859240':{'en': 'Yulin, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')},
'861859241':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861843410':{'en': 'Jincheng, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')},
'861843411':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861843412':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861843413':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861843414':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861843415':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')},
'861843416':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')},
'861843417':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')},
'861866290':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'86186503':{'en': 'Fu<NAME>ian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'86186501':{'en': 'X<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'86186507':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861865576':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861862860':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861862861':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861862862':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861862863':{'en': 'Hanzhong, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861862864':{'en': 'Han<NAME>', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861862865':{'en': 'Hanzhong, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861862866':{'en': 'Y<NAME>', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')},
'861862867':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861862868':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')},
'861862869':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')},
'861838908':{'en': 'Xigaze, Tibet', 'zh': u('\u897f\u85cf\u65e5\u5580\u5219\u5730\u533a')},
'861838909':{'en': '<NAME>', 'zh': u('\u897f\u85cf\u5c71\u5357\u5730\u533a')},
'861858306':{'en': 'Luzhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861858307':{'en': 'Luzhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861858300':{'en': 'Luzhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861858301':{'en': 'Luzhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861858302':{'en': 'Luzhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861858303':{'en': 'Luzhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861838900':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861838901':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861838902':{'en': 'Xigaze, Tibet', 'zh': u('\u897f\u85cf\u65e5\u5580\u5219\u5730\u533a')},
'861838903':{'en': 'Shannan, Tibet', 'zh': u('\u897f\u85cf\u5c71\u5357\u5730\u533a')},
'861838904':{'en': 'Nyingchi, Tibet', 'zh': u('\u897f\u85cf\u6797\u829d\u5730\u533a')},
'861838905':{'en': 'Qamdo, Tibet', 'zh': u('\u897f\u85cf\u660c\u90fd\u5730\u533a')},
'861838906':{'en': 'Nagqu, Tibet', 'zh': u('\u897f\u85cf\u90a3\u66f2\u5730\u533a')},
'861838907':{'en': 'Qamdo, Tibet', 'zh': u('\u897f\u85cf\u660c\u90fd\u5730\u533a')},
'861840499':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')},
'861840498':{'en': 'Jinzhong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861840491':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')},
'861840490':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')},
'861840493':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')},
'861840492':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')},
'861840495':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')},
'861840494':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')},
'861840497':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861840496':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861844936':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861844937':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861844934':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861844935':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861844932':{'en': 'Altay, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')},
'861844933':{'en': 'Kizilsu, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u5b5c\u52d2\u82cf\u67ef\u5c14\u514b\u5b5c\u81ea\u6cbb\u5dde')},
'861844930':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861844931':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861844938':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861844939':{'en': 'Shihezi, Xinjiang', 'zh': u('\u65b0\u7586\u77f3\u6cb3\u5b50\u5e02')},
'861860259':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861860258':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861860709':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5357\u660c\u5e02')},
'861860708':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5357\u660c\u5e02')},
'861840729':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861840728':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861840727':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861840726':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861840725':{'en': 'Jingmen, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861840724':{'en': 'Jingmen, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861840723':{'en': 'Jingmen, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861840722':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861840721':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861840720':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861855149':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861855148':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861855141':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861855140':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861855143':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861855142':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861855145':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861855144':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861855147':{'en': 'Ta<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861855146':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861850519':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861850518':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861850511':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861850510':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861850513':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861850512':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861850515':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861850514':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861850517':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861850516':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'86185352':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'86185353':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')},
'86185350':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')},
'86185351':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'86185356':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')},
'86185357':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')},
'86185354':{'en': 'Jinzhong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'86185355':{'en': 'Changzhi, Shanxi', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')},
'86185358':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')},
'86185359':{'en': 'Yuncheng, Shanxi', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')},
'861862859':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')},
'861862858':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')},
'86184599':{'en': 'Nanping, Fujian', 'zh': u('\u798f\u5efa\u7701\u5357\u5e73\u5e02')},
'86184598':{'en': 'Sanming, Fujian', 'zh': u('\u798f\u5efa\u7701\u4e09\u660e\u5e02')},
'86184593':{'en': 'Ningde, Fujian', 'zh': u('\u798f\u5efa\u7701\u5b81\u5fb7\u5e02')},
'86184592':{'en': 'Xiamen, Fujian', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'86184591':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'86184590':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'86184597':{'en': 'Longyan, Fujian', 'zh': u('\u798f\u5efa\u7701\u9f99\u5ca9\u5e02')},
'86184596':{'en': 'Zhangzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')},
'86184595':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'86184594':{'en': 'Putian, Fujian', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861862856':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')},
'86186335':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')},
'861840334':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')},
'86186331':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'86186330':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'86186333':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861840335':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')},
'861840336':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')},
'861856764':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861856765':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861856766':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861856767':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861856760':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861856761':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861856762':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861856763':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861840330':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861856768':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861856769':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861840331':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861862679':{'en': 'Baicheng, Jilin', 'zh': u('\u5409\u6797\u7701\u767d\u57ce\u5e02')},
'861840332':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')},
'861862675':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861862674':{'en': 'Jilin, Jilin', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861862677':{'en': 'Baicheng, Jilin', 'zh': u('\u5409\u6797\u7701\u767d\u57ce\u5e02')},
'861840333':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861862671':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u957f\u6625\u5e02')},
'861862670':{'en': 'Changchun, Jilin', 'zh': u('\u5409\u6797\u7701\u957f\u6625\u5e02')},
'861862673':{'en': 'Jilin, Jilin', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861862672':{'en': 'Changchun, Jilin', 'zh': u('\u5409\u6797\u7701\u957f\u6625\u5e02')},
'861866709':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861866708':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861866705':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861866704':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861866707':{'en': 'Wenzhou, Zhejiang', | |
# STL imports
# Package imports
import asyncio
import json
import logging
import sys
import aiohttp
import async_timeout
import numpy as np
import requests
from tqdm import tqdm, trange # Progress bar
import fbd.tools
from fbd.storage import Storage
class Gatherer:
# TODO: Move to numpy arrays / DFs?
# TODO: Store the already processed points as a table in a db for faster
# --get-places
def __init__(self, client_id, client_secret, storage=None, logger=None):
if not logger:
logging.basicConfig(level=logging.INFO)
logging.info('Gatherer: Didn\'t receive a custom logger,'
'so falling back to the default one')
self.logger = logging
else:
self.logger = logger
self.logger.debug('Gatherer: Using logger {0}'.format(logger))
self.logger.debug('Gatherer: Started initialization')
self.client_id = client_id
self.client_secret = client_secret
self.logger.debug('Gatherer: Getting the token')
token_params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'client_credentials'
}
self.token = requests.get(
'https://graph.facebook.com/v2.9/oauth/access_token?',
params=token_params).json()['access_token']
self.logger.debug('Gatherer: Initialized')
self.storage = storage
self.PLACE_ID_DETAILS_URL = ('https://graph.facebook.com/v2.9/{}'
'?fields=id,name,place_type,place_topics,'
'cover.fields(id,source),picture.type(large),'
f'location&access_token={self.token}')
self.PLACE_LAT_LON_RADIUS_URL = ('https://graph.facebook.com/v2.9/'
'search?type=place&q="*"¢er={},{}'
'&distance={}&fields=id&'
f'access_token={self.token}''')
@staticmethod
def _clean_url(url):
if url.startswith('http://web.'):
url = url[:7] + url[11:]
elif url.startswith('https://web.'):
url = url[:8] + url[12:]
return url
@staticmethod
def _response_to_post(post, page_id):
return {
'id': post['id'],
'page_id': page_id,
'message': post['message'],
'created_time': post['created_time'],
'link': post['link'],
'like': post['like']['summary']['total_count'],
'love': post['love']['summary']['total_count'],
'haha': post['haha']['summary']['total_count'],
'wow': post['wow']['summary']['total_count'],
'sad': post['sad']['summary']['total_count'],
'angry': post['angry']['summary']['total_count'],
'thankful': post['thankful']['summary']['total_count'],
}
# Generator
@staticmethod
def _generate_points(radius, circle_radius, center_point_lat,
center_point_lng):
# Defining the general square bounds
top = center_point_lat + fbd.tools.lat_from_met(radius)
bottom = center_point_lat - fbd.tools.lat_from_met(radius)
left = center_point_lng - fbd.tools.lon_from_met(radius)
right = center_point_lng + fbd.tools.lon_from_met(radius)
circle_step = (fbd.tools.lat_from_met(circle_radius),
fbd.tools.lon_from_met(circle_radius))
lat = top
lng = left
# Iterating by small circles from top->bottom from left->right
while lat >= bottom:
while lng <= right:
yield lat, lng
lng += circle_step[1]
lng = left
lat -= circle_step[0]
@staticmethod
def _num_iters(radius, circle_radius, center_point_lat, center_point_lng):
# Exhaust the _generate_points generator and count the # circles
return len([
x
for x, _ in Gatherer._generate_points(
radius, circle_radius, center_point_lat, center_point_lng)
])
def _exit(self):
self.logger.info('Gatherer - _exit: EXITING APPLICATION')
sys.exit(0)
def get_place_from_id(self, place_id, save_storage=True):
if not self.storage and save_storage:
raise Exception('Gatherer: get_place_from_id - '
'storage wasn\'t defined')
self.logger.debug(
'Gatherer: Get place request, id={0}'.format(place_id))
params = {
'ids': place_id,
'fields': 'id,name,place_type,place_topics,cover.fields(id,source),'
'picture.type(large),location',
'access_token': self.token
}
place = requests.get('https://graph.facebook.com/v2.9/',
params=params).json()[place_id]
if save_storage:
self.storage.update_place(place)
return place
@staticmethod
async def get_json(url, session, sem, params=None, timeout=15):
async with sem:
with async_timeout.timeout(timeout):
async with session.get(url, params=params) as response:
return json.loads(await response.text())
@staticmethod
async def get_text(url, session, sem, params=None, timeout=15):
async with sem:
with async_timeout.timeout(timeout):
async with session.get(url, params=params) as response:
return await response.text()
@staticmethod
async def get_links_list(links, json=True, max_concurrent=3, desc=None):
sem = asyncio.Semaphore(max_concurrent)
async with aiohttp.ClientSession() as session:
tasks = [
asyncio.ensure_future(
Gatherer.get_json(link, session, sem)
if json else Gatherer.get_text(link, session, sem))
for link in links
]
responses = [
await resp
for resp in tqdm(
asyncio.as_completed(tasks),
desc=desc,
total=len(tasks),
)
]
return responses
@staticmethod
async def get_links(links, json=True, max_concurrent=3, desc=None):
sem = asyncio.Semaphore(max_concurrent)
async with aiohttp.ClientSession() as session:
tasks = [
asyncio.ensure_future(
Gatherer.get_json(link, session, sem)
if json else Gatherer.get_text(link, session, sem))
for link in links
]
for resp in tqdm(
asyncio.as_completed(tasks),
desc=desc,
total=len(tasks),
):
yield await resp
async def _get_place_ids_point(self, lat, lon, circle_radius, session, sem):
# Getting the pages from graph api
id_list = []
response = await self.get_json(
self.PLACE_LAT_LON_RADIUS_URL.format(lat, lon, circle_radius),
session, sem
)
# Quick list comprehension to extract the IDs
place_id_list = [i.get('id') for i in response.get('data', [{}])]
for id_ in place_id_list:
if id_:
id_list.append(id_)
next_page = 'paging' in response and 'next' in response['paging']
# There are multiple pages in the response
while next_page:
response = await self.get_json(response['paging']['next'], session, sem)
for place in response['data']:
id_ = place.get('id')
if id_:
id_list.append(id_)
next_page = 'paging' in response and 'next' in response['paging']
return id_list if id_list else None
async def _process_saving_places(self, fetch_tasks, save_storage,
loop, session, sem, block_id):
self.logger.debug(f'_process_saving_places - ftasks={len(fetch_tasks)}'
f'block id = {block_id}')
places_details_tasks = []
places = []
for place_ids in tqdm(asyncio.as_completed(fetch_tasks),
total=len(fetch_tasks), file=sys.stdout,
desc=f'[Block {block_id}] Processing points'):
for pid in await place_ids:
places_details_tasks.append(asyncio.ensure_future(
self.get_json(
self.PLACE_ID_DETAILS_URL.format(pid), session, sem
)))
for place_details in tqdm(asyncio.as_completed(places_details_tasks),
total=len(places_details_tasks),
file=sys.stdout,
desc=f'[Block {block_id}] Processing place details'):
places.append(await place_details)
if save_storage:
return asyncio.ensure_future(
loop.run_in_executor(
None, self.storage.save_placelist, places)
)
else:
return places
async def _get_places_loc(self, circle_radius, city, radius, loop,
save_storage, max_concurrent, block_size):
self.logger.debug('_get_places_loc - starting')
sem = asyncio.Semaphore(max_concurrent)
city_coords = fbd.tools.get_coords(city)
# num_iters = self._num_iters(radius, circle_radius, *city_coords)
fetch_tasks = []
save_outs = []
block_id = 0
async with aiohttp.ClientSession() as session:
for i, coords in enumerate(
self._generate_points(radius, circle_radius, *city_coords)
):
fetch_tasks.append(
asyncio.ensure_future(
self._get_place_ids_point(
*coords, circle_radius, session, sem)
)
)
if (i + 1) % block_size == 0:
block_id += 1
save_outs.append(
self._process_saving_places(
fetch_tasks, save_storage, loop,
session, sem, block_id)
)
fetch_tasks = []
else:
if fetch_tasks:
block_id += 1
save_outs.append(
self._process_saving_places(fetch_tasks, save_storage,
loop, session, sem, block_id)
)
fetch_tasks = []
res = await asyncio.gather(*save_outs)
if save_storage:
for task in tqdm(asyncio.as_completed(res),
total=len(res),
file=sys.stdout,
desc='Saving the results'):
await task
else:
if type(res[0]) == list:
return [item for subarray in res for item in subarray]
else:
return res
def get_places_loc(self, circle_radius, city, radius, save_storage=True,
max_concurrent=3, block_size=3):
if not self.storage and save_storage:
raise Exception('Gatherer: get_places_loc - '
'storage wasn\'t defined')
# ASYNC
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
res = loop.run_until_complete(
self._get_places_loc(circle_radius, city, radius, loop,
save_storage, max_concurrent, block_size)
)
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
return res
def _get_events_from_place_id_syn(self, place_id):
# Getting the pages from graph api
req_string = (
'https://graph.facebook.com/v2.9/{}?fields=events{{id,name,'
'start_time,description,place,type,category,ticket_uri,'
'cover.fields(id,source),picture.type(large),attending_count,'
'declined_count,maybe_count,noreply_count}}&access_token={}'
).format(place_id, self.token)
response = requests.get(req_string).json()
event_list = [i for i in response.get('events', {}).get('data', [])]
# There are multiple pages in the response and we already went thru 1
while response.get('events', {}).get('paging', {}).get('next', None):
response = requests.get(response['events']['paging']['next']).json()
for event in response.get('data', []):
event_list.append(event)
return [place_id, event_list]
async def _get_events_from_places(self, loop, place_ids):
tasks = [
loop.run_in_executor(None, self._get_events_from_place_id_syn,
place_id) for place_id in place_ids
]
events = []
for pid_elist in tqdm(
asyncio.as_completed(tasks), total=len(place_ids),
desc='Getting events per place', unit='place', file=sys.stdout):
pelist = await pid_elist
for event in tqdm(pelist[1], desc='Processing place events',
unit='events', file=sys.stdout):
event['place_id'] = pelist[0]
events.append(event)
return events
def get_events_from_places(self, save_storage=True):
if not self.storage:
raise Exception('Gatherer: get_events_from_places - '
'storage wasn\'t defined')
self.logger.debug('Gatherer: get_events_from_places request')
place_ids = self.storage.get_all_place_ids()
loop = asyncio.get_event_loop()
events = loop.run_until_complete(
asyncio.ensure_future(
self._get_events_from_places(loop, place_ids)
)
)
loop.close()
if save_storage:
for e in tqdm(events, desc='Saving events', leave=False,
file=sys.stdout, unit='event'):
self.storage.save_event(e)
return events
async def _update_places(self, place_ids, max_concurrent=3):
places = []
sem = asyncio.Semaphore(max_concurrent)
# TODO: make those urls into classwide constants
url = ('https://graph.facebook.com/v2.9/{0}?fields=id,name,'
'place_type,place_topics,cover.fields(id,source),'
'picture.type(large),location&access_token={1}')
async with aiohttp.ClientSession() as session:
tasks = [
Gatherer.get_json(url.format(pid, self.token), session, sem)
for pid in place_ids
]
for place in tqdm(
asyncio.as_completed(tasks), total=len(place_ids),
desc='Updating places', unit='place', file=sys.stdout):
places.append(await place)
return places
def update_places(self, max_concurrent=3):
if not self.storage:
raise Exception('Gatherer: update_places - '
'storage wasn\'t defined')
place_ids = self.storage.get_all_place_ids()
loop = asyncio.get_event_loop()
places = loop.run_until_complete(
asyncio.ensure_future(
self._update_places(place_ids, max_concurrent)))
loop.close()
for p in tqdm(places, desc='Saving places', leave=False,
file=sys.stdout, unit='place'):
self.storage.update_place(p)
def get_page(self, page_id, get_posts=True):
# id,name,about,category,fan_count
request_str = ('https://graph.facebook.com/v2.9/{}'
'?fields=id,name,about,category,fan_count'
'&access_token={}')
page = requests.get(request_str.format(page_id, self.token)).json()
self.storage.save_page(page)
if get_posts:
for post in self.get_posts(page['id']):
self.storage.save_post(post)
def get_page_id(self, url):
url = Gatherer._clean_url(url)
request_str = 'https://graph.facebook.com/v2.9/?id={}&access_token={}'
response = requests.get(request_str.format(url, self.token)).json()
return response['id']
def get_posts(self, page_id, limit=100):
# nytimes?fields=posts{link,message,id,created_time}
request_str = (
'https://graph.facebook.com/v2.9/{}'
'?fields=posts{{'
'link, message, id, created_time,'
'reactions.type(LIKE).limit(0).summary(total_count).as(like),'
'reactions.type(LOVE).limit(0).summary(total_count).as(love),'
'reactions.type(HAHA).limit(0).summary(total_count).as(haha),'
'reactions.type(WOW).limit(0).summary(total_count).as(wow),'
'reactions.type(SAD).limit(0).summary(total_count).as(sad),'
'reactions.type(ANGRY).limit(0).summary(total_count).as(angry),'
'reactions.type(THANKFUL).limit(0).summary(total_count)'
'.as(thankful)}}&access_token={}')
# print(request_str.format(page_id, self.token))
response = requests.get(
request_str.format(page_id, self.token)).json()['posts']
posts = []
i = 0
while 'paging' in response and 'next' in response['paging']:
response = requests.get(response['paging']['next']).json()
for post in response['data']:
posts.append(Gatherer._response_to_post(post, page_id))
i += 1
if i >= limit:
break
return posts
def get_post_reactions(self, post_id):
request_str = (
'https://graph.facebook.com/v2.9/{}?fields='
'reactions.type(LIKE).limit(0).summary(total_count).as(like),'
'reactions.type(LOVE).limit(0).summary(total_count).as(love),'
'reactions.type(HAHA).limit(0).summary(total_count).as(haha),'
'reactions.type(WOW).limit(0).summary(total_count).as(wow),'
'reactions.type(SAD).limit(0).summary(total_count).as(sad),'
'reactions.type(ANGRY).limit(0).summary(total_count).as(angry),'
'reactions.type(THANKFUL).limit(0).summary(total_count)'
'.as(thankful)&access_token={}')
response = requests.get(request_str.format(post_id, self.token)).json()
del response['id']
return {
item: response[item]['summary']['total_count']
for item in response
}
if __name__ == '__main__':
config = {
'storage_url': 'sqlite:///fbd/db/fb.sqlite',
'verbose': False,
'update_places': False,
'update_events': False,
}
# Configuring the logger
if config['verbose']:
logging.basicConfig(level=logging.DEBUG)
log = logging
else:
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
if config['storage_url']:
storage = Storage(db_url=config['storage_url'])
else:
storage = Storage()
with open('fbd/config.json', 'r') as f:
params = json.load(f)
print(params)
gatherer = Gatherer(params['client_id'], params['client_secret'],
storage=storage, logger=log)
import time
from pprint import pprint
results = []
for max_concurrent in [1, 3, 5, 10, 500]:
for block_size in [1, 3, 10, 20]:
start = time.time()
| |
np.imag(ydata), color='k')
elif component == 'both':
plt.plot(xdata, np.real(ydata), color='k', label='Re')
plt.plot(xdata, np.imag(ydata), color='#808080', label='Im')
plt.legend()
else:
msg = f'{R}component was not given a valid value' \
+ f' (should be \'real\', \'imag\' or \'both\').{END}'
raise ValueError(msg)
if domain == 'frequency':
plt.xlim(xdata[0], xdata[-1])
plt.xlabel(xlabel)
plt.show()
# TODO
# make_fid:
# include functionality to write to Bruker files, Varian files,
# JEOL files etc
def make_fid(self, n=None, oscillators=None, kill=True):
"""Constructs a synthetic FID using a parameter estimate and
experiment parameters.
Parameters
----------
n : [int], or [int, int], or None default: None
The number of points to construct the FID with in each dimesnion.
If `None`, :py:meth:`get_n` will be used, meaning the signal will
have the same number of points as the original data.
oscillators : None or list, default: None
Which oscillators to include in result. If `None`, all
oscillators will be included. If a list of ints, the subset of
oscillators corresponding to these indices will be used. Note
that all elements should be in ``range(self.result.shape[0])``.
kill : bool, default: True
If `self.result` is `None`, `kill` specifies how the method will
act:
* If `True`, an AttributeIsNoneError is raised.
* If `False`, `None` is returned.
Returns
-------
fid : numpy.ndarray
The generated FID.
tp : [numpy.ndarray] or [numpy.ndarray, numpy.ndarray]
The time-points at which the signal is sampled, in each dimension.
See Also
--------
:py:func:`nmrespy.sig.make_fid`
"""
result = self.get_result(kill=kill)
if oscillators is None:
oscillators = list(range(result.shape[0]))
if n is None:
n = self.get_n()
ArgumentChecker(
[
(n, 'n', 'int_list'),
(oscillators, 'oscillators', 'int_list'),
],
dim=self.get_dim(),
)
return sig.make_fid(result[[oscillators]], n, self.get_sw(),
offset=self.get_offset())
@logger
def phase_data(self, p0=None, p1=None):
"""Phase `self.data`
Parameters
----------
p0 : [float], [float, float], or None default: None
Zero-order phase correction in each dimension in radians.
If `None`, the phase will be set to `0.0` in each dimension.
p1 : [float], [float, float], or None default: None
First-order phase correction in each dimension in radians.
If `None`, the phase will be set to `0.0` in each dimension.
"""
if p0 is None:
p0 = self.get_dim() * [0.0]
if p1 is None:
p1 = self.get_dim() * [0.0]
self.data = sig.ift(
sig.phase(sig.ft(self.data), p0, p1)
)
def manual_phase_data(self, max_p1=None):
"""Perform manual phase correction of `self.data`.
Zero- and first-order phase pharameters are determined via
interaction with a Tkinter- and matplotlib-based graphical user
interface.
Parameters
----------
max_p1 : float or None, default: None
Specifies the range of first-order phases permitted. For each
dimension, the user will be allowed to choose a value of `p1`
within [`-max_p1`, `max_p1`]. By default, `max_p1` will be
``10 * numpy.pi``.
"""
p0, p1 = sig.manual_phase_spectrum(sig.ft(self.data), max_p1)
if not (p0 is None and p1 is None):
self.phase_data(p0=[p0], p1=[p1])
@logger
def frequency_filter(
self, region, noise_region, cut=True, cut_ratio=3.0, region_unit='ppm',
):
"""Generates frequency-filtered data from `self.data`.
Parameters
----------
region: [[int, int]], [[int, int], [int, int]], [[float, float]] or\
[[float, float], [float, float]]
Cut-off points of the spectral region to consider.
If the signal is 1D, this should be of the form `[[a,b]]`
where `a` and `b` are the boundaries.
If the signal is 2D, this should be of the form
`[[a,b], [c,d]]` where `a` and `b` are the boundaries in
dimension 1, and `c` and `d` are the boundaries in
dimension 2. The ordering of the bounds in each dimension is
not important.
noise_region: [[int, int]], [[int, int], [int, int]],\
[[float, float]] or [[float, float], [float, float]]
Cut-off points of the spectral region to extract the spectrum's
noise variance. This should have the same structure as `region`.
cut : bool, default: True
If `False`, the filtered signal will comprise the same number of
data points as the original data. If `True`, prior to inverse
FT, the data will be sliced, with points not in the region
specified by `cut_ratio` being removed.
cut_ratio : float, default: 2.5
If cut is `True`, defines the ratio between the cut signal's sweep
width, and the region width, in each dimesnion.
It is reccommended that this is comfortably larger than `1.0`.
`2.0` or higher should be appropriate.
region_unit : 'ppm', 'hz' or 'idx', default: 'ppm'
The unit the elements of `region` and `noise_region` are
expressed in.
Notes
-----
This method assigns the attribute `filter_info` to an instance of
:py:class:`nmrespy.freqfilter.FrequencyFilter`. To obtain information
on the filtration, use :py:meth:`get_filter_info`.
"""
self.filter_info = FrequencyFilter(
self.get_data(), region, noise_region, region_unit=region_unit,
sw=self.get_sw(), offset=self.get_offset(),
sfo=self.get_sfo(kill=True), cut=cut, cut_ratio=cut_ratio,
)
def get_filter_info(self, kill=True):
"""Returns information relating to frequency filtration.
Parameters
----------
kill : bool, default: True
If `filter_info` is `None`, and `kill` is `True`, an error will
be raised. If `kill` is False, `None` will be returned.
Returns
-------
filter_info : nmrespy.freqfilter.FrequencyFilter
Notes
-----
There are numerous methods associated with `filter_info` for
obtaining relavent infomation about the filtration. See
:py:class:`nmrespy.freqfilter.FrequencyFilter` for details.
"""
return self._check_if_none(
'filter_info', kill, method='frequency_filter'
)
def _get_data_sw_offset(self):
"""Retrieve data, sweep width and offset, based on whether
frequency filtration have been applied.
Returns
-------
data : numpy.ndarray
sw : [float] or [float, float]
Sweep width (Hz).
offset : [float] or [float, float]
Transmitter offset (Hz).
Notes
-----
* If `self.filter_info` is equal to `None`, `self.data` will be
analysed
* If `self.filter_info` is an instance of
:py:class:`nmrespy.freqfilter.FrequencyFilter`,
`self.filter_info.filtered_signal` will be analysed.
"""
if self.filter_info is not None:
data = self.filter_info.get_fid()
sw = self.filter_info.get_sw()
offset = self.filter_info.get_offset()
else:
data = self.get_data()
sw = self.get_sw()
offset = self.get_offset()
return data, sw, offset
@logger
def matrix_pencil(self, M=0, trim=None, fprint=True):
"""Implementation of the 1D Matrix Pencil Method [#]_ [#]_ or 2D
Modified Matrix Enchancement and Matrix Pencil (MMEMP) method [#]_
[#]_ with the option of Model Order Selection using the Minumum
Descrition Length (MDL) [#]_.
Parameters
----------
M : int, default: 0
The number of oscillators to use in generating a parameter
estimate. If `M` is set to `0`, the number of oscillators will be
estimated using the MDL.
trim : [int], [int, int], or None, default: None
If `trim` is a list, the analysed data will be sliced such that
its shape matches `trim`, with the initial points in the signal
being retained. If `trim` is `None`, the data will not be
sliced. Consider using this in cases where the full signal is
large, such that the method takes a very long time, or your PC
has insufficient memory to process it.
fprint : bool, default: True
If `True` (default), the method provides information on
progress to the terminal as it runs. If `False`, the method
will run silently.
Notes
-----
The data analysed will be the following:
* If `self.filter_info` is equal to `None`, `self.data` will be
analysed
* If `self.filter_info` is an instance of
:py:class:`nmrespy.freqfilter.FrequencyFilter`,
`self.filter_info.filtered_signal` will be analysed.
**For developers:** See :py:meth:`_get_data_sw_offset`
Upon successful completion is this method, `self.mpm_info` will
be updated with an instance of :py:class:`nmrespy.mpm.MatrixPencil`.
References
----------
.. [#] <NAME> and <NAME>. “Matrix pencil method for
estimating parameters of exponentially damped/undamped sinusoids
in noise”. In: IEEE Trans. Acoust., Speech, Signal Process. 38.5
(1990), pp. 814–824.
.. [#] <NAME> et al. “A novel detection–estimation scheme for
noisy NMR signals: applications to delayed acquisition data”.
In: <NAME>. Reson. 128.1 (1997), pp. 30–41.
.. [#] <NAME>. “Estimating two-dimensional frequencies by matrix
enhancement and matrix pencil”. In: [Proceedings] ICASSP 91: 1991
International Conference on Acoustics, Speech, and Signal
Processing. IEEE. 1991, pp. 3073–3076.
.. [#] <NAME> et al. “Estimation of two-dimensional
frequencies using modified matrix pencil method”. In: IEEE Trans.
Signal Process. 55.2 (2007), pp. 718–724.
.. [#] <NAME>, <NAME>, Detection of signals by information
theoretic criteria, IEEE Transactions on Acoustics, Speech, and
Signal Processing 33 (2) (1985) 387–392.
"""
data, sw, offset = self._get_data_sw_offset()
if trim is None:
trim = [s for s in data.shape]
ArgumentChecker([(trim, 'trim', 'int_list')], dim=self.dim)
trim = tuple(np.s_[0:t] for t in trim)
# Slice data
data = data[trim]
mpm_info = MatrixPencil(
data, sw, offset, self.sfo, M, fprint
)
self.result = mpm_info.get_result()
self.errors = None
self._saveable = False
# TODO: | |
<gh_stars>1-10
import argparse
import logging
import os
import time
import torch
import transformers
from tensorboardX import SummaryWriter
from tqdm import tqdm
from model import SpanDomainModel, CRFModel, SoftmaxModel
from data_processor import SpanProcessor, CRFProcessor, SoftMaxProcessor
from utils import set_seed, calculate
# 评价过程,打印各类实体F1值和总体F1值
def evaluate(data, tags_list, title, mode, test_flag=False):
def change_label_span(start_tags, end_tags, length):
i = 0
result = []
while i < length - 1:
if start_tags[i] != 0:
tag = start_tags[i]
start_index = i
while i < length and end_tags[i] == 0:
i += 1
if i < length and end_tags[i] == tag:
result.append((start_index, i + 1, tag.item()))
i += 1
return result
result_f1 = None
result_dict = {}
if mode == 'span':
domain_entities_dict = {x + 1: [0, 0, 0] for x in range(len(tags_list))}
logging.info("***** {} Evaluation *****".format(title))
for domain, domain_data in data.items():
outputs = domain_data['outputs']
labels = domain_data['labels']
mask_ids = domain_data["mask_ids"]
sentence_num = outputs['num']
if mode == 'span':
entities_dict = {x + 1: [0, 0, 0] for x in range(len(tags_list))}
result_list = []
for i in range(sentence_num):
if mode == 'span':
length = mask_ids[i].sum()
predict_list = change_label_span(outputs['start_outputs'][i], outputs['end_outputs'][i], length)
result_list.append((predict_list, length - 2))
if not test_flag:
if mode == 'span':
label_list = change_label_span(labels['start_labels_ids'][i], labels['end_labels_ids'][i], length)
for label in label_list:
entities_dict[label[2]][1] += 1
for predict in predict_list:
entities_dict[predict[2]][0] += 1
if predict in label_list:
entities_dict[predict[2]][2] += 1
result_dict[domain] = result_list
if not test_flag:
all_result = [0, 0, 0]
for entity in entities_dict:
for i in range(len(entities_dict[entity])):
all_result[i] += entities_dict[entity][i]
logging.info("***** {} *****".format(domain))
p, r, f1 = calculate(all_result)
logging.info("ALL Precision={:.4f}, Recall={:.4f}, F1={:.4f}, predict: {}, truth: {}, right: {}".format(
p, r, f1, all_result[0], all_result[1], all_result[2]))
for tag_type in entities_dict:
if mode == "span":
tag = tags_list[tag_type - 1]
p, r, f1 = calculate(entities_dict[tag_type])
logging.info("{} Precision={:.4f}, Recall={:.4f}, F1={:.4f}, predict: {}, truth: {}, "
"right: {}".format(tag, p, r, f1, entities_dict[tag_type][0],
entities_dict[tag_type][1], entities_dict[tag_type][2]))
for entity in entities_dict:
for i in range(len(entities_dict[entity])):
domain_entities_dict[entity][i] += entities_dict[entity][i]
if not test_flag:
all_result = [0, 0, 0]
for entity in domain_entities_dict:
for i in range(len(domain_entities_dict[entity])):
all_result[i] += domain_entities_dict[entity][i]
logging.info("***** ALL *****")
p, r, f1 = calculate(all_result)
logging.info("ALL Precision={:.4f}, Recall={:.4f}, F1={:.4f}, predict: {}, truth: {}, right: {}".format(
p, r, f1, all_result[0], all_result[1], all_result[2]))
result_f1 = f1
for tag_type in domain_entities_dict:
if mode == "span":
tag = tags_list[tag_type - 1]
p, r, f1 = calculate(domain_entities_dict[tag_type])
logging.info("{} Precision={:.4f}, Recall={:.4f}, F1={:.4f}, predict: {}, truth: {}, "
"right: {}".format(tag, p, r, f1, domain_entities_dict[tag_type][0],
domain_entities_dict[tag_type][1], domain_entities_dict[tag_type][2]))
return result_f1, result_dict
def get_one_domain_predict(dev_dataloader, model, device, title, mode):
if mode == "span":
start_labels_ids_list = []
end_labels_ids_list = []
start_output_list = []
end_output_list = []
mask_ids_list = []
# tqdm进度条库,可视化
for _, data in enumerate(tqdm(dev_dataloader, desc=title)):
token_ids = data[0].to(device, dtype=torch.long)
mask_ids = data[1].to(device, dtype=torch.long)
token_type_ids = data[2].to(device, dtype=torch.long)
if mode == "span":
start_labels_ids = data[3].to(device, dtype=torch.long)
end_labels_ids = data[4].to(device, dtype=torch.long)
output = model(token_ids, mask_ids, token_type_ids)
start_output = output["final_start_output"].argmax(dim=-1)
end_output = output["final_end_output"].argmax(dim=-1)
start_labels_ids_list.append(start_labels_ids)
end_labels_ids_list.append(end_labels_ids)
start_output_list.append(start_output)
end_output_list.append(end_output)
mask_ids_list.append(mask_ids)
if mode == "span":
start_labels_ids = torch.cat(start_labels_ids_list, dim=0)
end_labels_ids = torch.cat(end_labels_ids_list, dim=0)
start_outputs = torch.cat(start_output_list, dim=0)
end_outputs = torch.cat(end_output_list, dim=0)
mask_ids = torch.cat(mask_ids_list, dim=0)
outputs = {}
labels = {}
if mode == "span":
outputs['start_outputs'] = start_outputs
outputs['end_outputs'] = end_outputs
outputs['num'] = start_labels_ids.size()[0]
labels['start_labels_ids'] = start_labels_ids
labels['end_labels_ids'] = end_labels_ids
return outputs, labels, mask_ids
# 如果有了模型,则进行验证,span,crf,softmax架构可选择
def development(model, device, dev_dataloader_dict, tags_list, mode):
# 注意model.train()和model.eval()的不同作用。
model.eval()
start_time = time.time()
all_data = {}
sentences_num = 0
for domain, dev_dataloader in dev_dataloader_dict.items():
outputs, labels, mask_ids_list = get_one_domain_predict(dev_dataloader, model, device,
"Development-{}".format(domain), mode)
sentences_num += len(dev_dataloader)
all_data[domain] = {
"outputs": outputs,
"labels": labels,
"mask_ids": mask_ids_list
}
f1, predict_dict = evaluate(all_data, tags_list, "Development", mode, False)
end_time = time.time()
logging.info("Development end, speed: {:.1f} sentences/s, all time: {:.2f}s".format(
sentences_num / (end_time - start_time), end_time - start_time))
return f1, predict_dict
# 训练过程
def train(args, model, device, train_datasets, dev_datasets, tags_list, writer):
# 注意model.train()和model.eval()的不同作用。
model.train()
epoch_step = len(train_datasets) // args.train_batch_size + 1
num_train_optimization_steps = epoch_step * args.epochs
logging.info("***** Running training *****")
logging.info(" Num examples = %d", len(train_datasets))
logging.info(" Batch size = %d", args.train_batch_size)
logging.info(" Num steps = %d", num_train_optimization_steps)
# os.walk方法,主要用来遍历一个目录内各个子目录和子文件。
# 可以得到一个三元tupple(dirpath, dirnames, filenames), 第一个为起始路径,第二个为起始路径下的文件夹,第三个是起始路径下的文件。
_, _, files = list(os.walk(args.output_dir))[0]
epoch = 0
for file in files:
if len(file) > 0 and file[:10] == "checkpoint":
temp = file[11:-4]
if temp.isdigit() and int(temp) > epoch:
epoch = int(temp)
# 如果训练了几轮,保存了模型,那就直接导入模型。
if epoch > 0:
logging.info('checkpoint-' + str(epoch) + '.pkl is exit!')
model = torch.load(os.path.join(args.output_dir, 'checkpoint-' + str(epoch) + '.pkl'))
logging.info("Load model:" + os.path.join(args.output_dir, 'checkpoint-' + str(epoch) + '.pkl'))
if epoch >= args.epochs:
logging.info("The model has been trained!")
return
train_dataloader = torch.utils.data.DataLoader(train_datasets, batch_size=args.train_batch_size, shuffle=True)
if args.dev:
dev_dataloader_dict = {}
for domain, dev_dataset in dev_datasets.items():
dev_dataloader = torch.utils.data.DataLoader(dev_dataset["dev_dataset"], batch_size=args.train_batch_size, shuffle=False)
dev_dataloader_dict[domain] = dev_dataloader
best_f1 = -1
# 如果已经保存了最好的模型,就直接导入!
if os.path.exists(os.path.join(args.output_dir, 'checkpoint-best.pkl')):
logging.info('checkpoint-best.pkl is exit!')
model = torch.load(os.path.join(args.output_dir, 'checkpoint-best.pkl'))
best_f1, _ = development(model, device, dev_dataloader_dict, tags_list, args.architecture)
logging.info("Load best F1={:.4f}".format(best_f1))
if args.architecture == "span" or "softmax":
optimizer = transformers.AdamW(params=model.parameters(), lr=args.learning_rate)
if args.architecture == "crf":
optimizer = transformers.AdamW(
params=[
{'params': model.bert.parameters()},
{'params': model.dence.parameters(), 'lr': args.crf_lr},
{'params': model.crf.parameters(), 'lr': args.crf_lr}
],
lr=args.learning_rate)
# 学习率预热函数,使学习率线性增长,然后到某一schedule,在线性/指数降低
lr_scheduler = transformers.get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=int(num_train_optimization_steps) * args.warmup_proportion,
num_training_steps=num_train_optimization_steps
)
# 开始训练
for current_epoch in range(epoch, args.epochs):
model.train()
all_loss = 0
start_time = time.time()
all_step = 0
for step, data in enumerate(tqdm(train_dataloader, desc="Training")):
token_ids = data[0].to(device, dtype=torch.long)
mask_ids = data[1].to(device, dtype=torch.long)
token_type_ids = data[2].to(device, dtype=torch.long)
if args.architecture == "span":
start_labels_ids = data[3].to(device, dtype=torch.long)
end_labels_ids = data[4].to(device, dtype=torch.long)
domain_labels_ids = data[5].to(device, dtype=torch.long)
label = {
"start_labels_ids": start_labels_ids,
"end_labels_ids": end_labels_ids,
"domain_labels_ids": domain_labels_ids,
}
output = model(token_ids, mask_ids, token_type_ids)
loss = model.loss(output, label, mask_ids)
if writer:
writer.add_scalar('loss', loss, global_step=current_epoch * epoch_step + step + 1)
writer.add_scalar('learning_rate',
optimizer.state_dict()['param_groups'][0]['lr'],
global_step=current_epoch * epoch_step + step + 1)
all_loss += loss.item()
optimizer.zero_grad() # 把梯度置零,也就是把loss关于weight的导数变成0
# optimizer的step为什么不能放在min-batch那个循环之外,还有optimizer.step和loss.backward的区别:
# https://blog.csdn.net/xiaoxifei/article/details/87797935
loss.backward()
optimizer.step()
lr_scheduler.step()
all_step += 1
# pytorch 中的 state_dict 是一个简单的python的字典对象,将每一层与它的对应参数建立映射关系。
# torch.optim模块中的Optimizer优化器对象也存在一个state_dict对象,此处的state_dict字典对象包含state和param_groups的字典对象,
# 而param_groups key对应的value也是一个由学习率,动量等参数组成的一个字典对象。
lr = optimizer.state_dict()['param_groups'][0]['lr']
end_time = time.time()
logging.info("Epoch: {}, Loss: {:.3g}, learning rate: {:.3g}, Time: {:.2f}s".format(
current_epoch + 1, all_loss / all_step, lr, end_time - start_time))
torch.save(model, os.path.join(args.output_dir, 'checkpoint-' + str(current_epoch + 1) + '.pkl'))
delet_checkpoints_name = os.path.join(args.output_dir, 'checkpoint-' + str(
current_epoch + 1 - args.keep_last_n_checkpoints) + '.pkl')
if os.path.exists(delet_checkpoints_name):
os.remove(delet_checkpoints_name)
if args.dev:
f1, _ = development(model, device, dev_dataloader_dict, tags_list, args.architecture)
if f1 == -1 or f1 > best_f1:
best_f1 = f1
logging.info("Best F1={:.4f}, save model!".format(best_f1))
torch.save(model, os.path.join(args.output_dir, 'checkpoint-best.pkl'))
if writer:
writer.add_scalar('dev_f1', f1, global_step=current_epoch * epoch_step)
writer.add_scalar('dev_best_f1', best_f1, global_step=current_epoch * epoch_step)
torch.save(model, os.path.join(args.output_dir, 'checkpoint-last.pkl'))
if args.dev:
f1, _ = development(model, device, dev_dataloader_dict, tags_list, args.architecture)
if f1 == -1 or f1 > best_f1:
best_f1 = f1
logging.info("Best F1={:.4f}, save model!".format(best_f1))
torch.save(model, os.path.join(args.output_dir, 'checkpoint-best.pkl'))
logging.info("Training end!")
def write_one_domain_to_file(file_path, sentences, predict_list, tags_list, mode):
assert len(sentences) == len(predict_list)
write_data_list = []
for i in range(len(sentences)):
sentence, label = sentences[i]
predict, _ = predict_list[i]
result = ['O'] * len(sentence)
for entity in predict:
if mode == 'span':
tag = tags_list[entity[2] - 1]
elif mode == 'softmax' or mode == 'crf':
tag = entity[2]
result[entity[0] - 1] = "B-" + tag
for j in range(entity[0], entity[1] - 1):
result[j] = "I-" + tag
write_data = []
for j in range(len(sentence)):
write_data.append((sentence[j], label[j], result[j]))
write_data_list.append(write_data)
with open(file_path, "w", encoding="utf8") as fout:
for sentence in write_data_list:
for data in sentence:
fout.write(data[0] + '\t' + data[1] + '\t' + data[2] + '\n')
fout.write('\n')
# 验证过程,直接加载模型
def dev(args, datasets, model, device, tags_list):
# 加载模型,没有模型则报错
if args.model is not None:
model = torch.load(args.model)
logging.info("Load model:" + args.model)
elif os.path.exists(os.path.join(args.output_dir, 'checkpoint-best.pkl')):
model = torch.load(os.path.join(args.output_dir, 'checkpoint-best.pkl'))
logging.info("Load model:" + os.path.join(args.output_dir, 'checkpoint-best.pkl'))
elif os.path.exists(os.path.join(args.output_dir, 'checkpoint-last.pkl')):
model = torch.load(os.path.join(args.output_dir, 'checkpoint-last.pkl'))
logging.info("Load model:" + os.path.join(args.output_dir, 'checkpoint-last.pkl'))
else:
logging.info("Error! The model file does not exist!")
exit(1)
model.eval()
dataloader_dict = {}
for domain, dev_dataset in datasets.items():
dataloader = torch.utils.data.DataLoader(dev_dataset["dev_dataset"], batch_size=args.train_batch_size,
shuffle=False)
dataloader_dict[domain] = dataloader
_, predict_dict = development(model, device, dataloader_dict, tags_list, args.architecture)
dev_dir = os.path.join(args.output_dir, "development")
if not os.path.exists(dev_dir):
os.makedirs(dev_dir)
for domain in datasets:
sentences = datasets[domain]['dev_data']
predict_list = predict_dict[domain]
assert len(sentences) == len(predict_list)
write_data_list = []
for i in range(len(sentences)):
sentence, label = sentences[i]
predict, _ = predict_list[i]
result = ['O'] * len(sentence)
for entity in predict:
if args.architecture == 'span':
tag = tags_list[entity[2] - 1]
elif args.architecture == 'softmax' or | |
<filename>Policy_Gradient_with_Continuous_action.py
###############################################################################
# For more info, see https://hoseinkh.github.io/
###############################################################################
import gym
import os
import sys
import numpy as np
"""
# if using tensorflow v1:
import tensorflow as tf
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import matplotlib.pyplot as plt
import matplotlib
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import RBFSampler
from gym import wrappers
from datetime import datetime
###############################################################################
# Feature transformer uses RBF kernels to transform the original state space to ...
# ... higher dimensions. This helps with the performance of the model!
class FeatureTransformer:
def __init__(self, env, n_components=500):
# generate states (observations)
observation_examples = np.array([env.observation_space.sample() for x in range(10000)])
# define scaler and scale the states (observations) --> mean 0 and variance 1
scaler = StandardScaler()
scaler.fit(observation_examples)
#
# Now we basically use RBF to for feature generation
# Each RBFSampler takes each (original) (feature representation) of ...
# ... a state and converts it to "n_components" new featuers.
# Hence, after concatenating the new features, we convert each state to ...
# ... {(# RBF samplers) * n_components} new features.
#
# We use RBF kernels with different variances to cover different parts ...
# ... of the space.
#
featurizer = FeatureUnion([
("rbf1", RBFSampler(gamma=5.0, n_components=n_components)),
("rbf2", RBFSampler(gamma=2.0, n_components=n_components)),
("rbf3", RBFSampler(gamma=1.0, n_components=n_components)),
("rbf4", RBFSampler(gamma=0.5, n_components=n_components))
])
# For all the generated samples, transform original state representaions ...
# ... to a new state representation using "featurizer"
example_features = featurizer.fit_transform(scaler.transform(observation_examples))
#
self.dimensions = example_features.shape[1]
self.scaler = scaler
self.featurizer = featurizer
######################################
def transform(self, observations):
#
scaled_original_state_representation = self.scaler.transform(observations)
#
scaled_higher_dimensions_state_representation = self.featurizer.transform(scaled_original_state_representation)
return scaled_higher_dimensions_state_representation
###############################################################################
# It is better to define everything directly. This allows tensorflow to ...
# ... automatically calculate the cost functions, and hence we get rid of ...
# ... the issue of manually feeding it to the tensorflow.
# To do this TensorFlow needs to remember what operations happen in what ...
# ... order during the forward pass. Then, during the backward pass, ...
# ... TensorFlow traverses this list of operations in reverse order to ...
# ... compute gradients.
class HiddenLayer:
def __init__(self, inp_size_of_hidden_layer, out_size_of_hidden_layer, f=tf.nn.tanh, use_bias=True, zeros=False):
if zeros:
W = np.zeros((inp_size_of_hidden_layer, out_size_of_hidden_layer), dtype=np.float32)
else:
W = tf.random_normal(shape=(inp_size_of_hidden_layer, out_size_of_hidden_layer)) * np.sqrt(2. / inp_size_of_hidden_layer, dtype=np.float32)
self.W = tf.Variable(W)
#
self.use_bias = use_bias
if use_bias:
self.b = tf.Variable(np.zeros(out_size_of_hidden_layer).astype(np.float32))
#
self.f = f
######################################
def forward(self, X):
if self.use_bias:
a = tf.matmul(X, self.W) + self.b
else:
a = tf.matmul(X, self.W)
return self.f(a)
###############################################################################
# approximates pi(a | s)
# here we use two NNs. One for predicting the mean of the action, and one to ...
# ... predict the std of the action. However, the two NNs have the same body, ...
# ... and only the last layer differs!
class PolicyModel:
def __init__(self, data_input_size, feature_transformer, hidden_layer_sizes=[]):
self.feature_transformer = feature_transformer
#
##### hidden layers #####
NN_input_size = data_input_size
self.hidden_layers = []
for NN_output_size in hidden_layer_sizes:
layer = HiddenLayer(NN_input_size, NN_output_size)
self.hidden_layers.append(layer)
NN_input_size = NN_output_size
#
## final layer for the mean (we use linear for the activation function)
self.mean_layer = HiddenLayer(data_input_size, 1, lambda x: x, use_bias=False, zeros=True)
#
## final layer for the variance (we use softplus for the activation function to ensure positive std)
self.stdv_layer = HiddenLayer(data_input_size, 1, tf.nn.softplus, use_bias=False, zeros=False)
#
### inputs and targets (used in the session)
## self.X is the feature representaion of the state (after applying self.feature_transformer)
self.X = tf.placeholder(tf.float32, shape=(None, data_input_size), name='X')
self.actions = tf.placeholder(tf.float32, shape=(None,), name='actions')
## self.advantages is the G - V(S), which uses V(S) as a Baseline to ...
## ... decrease variance of the model!
self.advantages = tf.placeholder(tf.float32, shape=(None,), name='advantages')
#
### get final hidden layer
out_of_curr_layer = self.X
for layer in self.hidden_layers:
out_of_curr_layer = layer.forward(out_of_curr_layer)
#
### calculate output and cost
## calculate the mean of the Gaussian distribution for the action
mean = self.mean_layer.forward(out_of_curr_layer)
## calculate the std of the Gaussian distribution for the action
stdv = self.stdv_layer.forward(out_of_curr_layer) + 1e-5 # we do smoothing by adding small amount to the std
#
### make mean and std 1-D
mean = tf.reshape(mean, [-1])
stdv = tf.reshape(stdv, [-1])
#
### Build the normal distribution of the action
norm = tf.distributions.Normal(mean, stdv)
## note that the actions in the environment are between -1 and 1
self.predict_op = tf.clip_by_value(norm.sample(), -1, 1)
#
log_probs = norm.log_prob(self.actions)
## note that here we add a regularization term (i.e. 0.1*norm.entropy()) to the cost function ...
## ... to avoid overfitting!
cost = -tf.reduce_sum(self.advantages * log_probs + 0.1*norm.entropy())
self.train_op = tf.train.AdamOptimizer(1e-3).minimize(cost)
######################################
def set_session(self, session):
self.session = session
######################################
def partial_fit(self, X, actions, advantages):
X = np.atleast_2d(X)
X = self.feature_transformer.transform(X)
#
actions = np.atleast_1d(actions)
advantages = np.atleast_1d(advantages)
self.session.run(
self.train_op,
feed_dict={
self.X: X,
self.actions: actions,
self.advantages: advantages,
}
)
######################################
def predict(self, X):
X = np.atleast_2d(X)
X = self.feature_transformer.transform(X)
return self.session.run(self.predict_op, feed_dict={self.X: X})
######################################
def sample_action(self, X):
p = self.predict(X)[0]
return p
###############################################################################
# approximates V(s)
# we use this function to calculate state-value function V(s) ...
# ... which is used as Baseline in the policy gradient, which ...
# ... helps decreasing the variance of the model!
class ValueModel:
def __init__(self, data_input_size, feature_transformer, hidden_layer_sizes=[]):
self.feature_transformer = feature_transformer
self.costs = []
#
# create the neural network for the state-value approximation (i.e. V(S))
self.layers = []
NN_input_size = data_input_size
for NN_output_size in hidden_layer_sizes:
layer = HiddenLayer(NN_input_size, NN_output_size)
self.layers.append(layer)
NN_input_size = NN_output_size
#
## final layer. Since we are predicting the value function, we only have one node, and ...
## ... the linear function is used as the activation function in the output layer
layer = HiddenLayer(NN_input_size, 1, lambda x: x)
self.layers.append(layer)
#
### inputs and targets
## self.X is the (feature-transformed) feature representation of the state
self.X = tf.placeholder(tf.float32, shape=(None, data_input_size), name='X')
## self.Y is the observed value for the state S.
self.Y = tf.placeholder(tf.float32, shape=(None,), name='Y')
#
### calculate output and cost
out_of_curr_layer = self.X # = feature representation of the state
for layer in self.layers:
out_of_curr_layer = layer.forward(out_of_curr_layer)
Y_hat = tf.reshape(out_of_curr_layer, [-1]) # the output of the NN (estimated V(s))
self.predict_op = Y_hat
#
### we use the squared error as the error function!
cost = tf.reduce_sum(tf.square(self.Y - Y_hat))
self.cost = cost
self.train_op = tf.train.AdamOptimizer(1e-1).minimize(cost)
######################################
def set_session(self, session):
self.session = session
######################################
def partial_fit(self, X, Y):
X = np.atleast_2d(X)
X = self.feature_transformer.transform(X)
Y = np.atleast_1d(Y)
self.session.run(self.train_op, feed_dict={self.X: X, self.Y: Y})
cost = self.session.run(self.cost, feed_dict={self.X: X, self.Y: Y})
self.costs.append(cost)
######################################
def predict(self, X):
X = np.atleast_2d(X)
X = self.feature_transformer.transform(X)
return self.session.run(self.predict_op, feed_dict={self.X: X})
###############################################################################
def play_one_td(env, policy_model, value_model, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
#
while not done and iters < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = policy_model.sample_action(observation)
prev_observation = observation
observation, reward, done, info = env.step([action])
#
totalreward += reward
#
# update the models
V_next = value_model.predict(observation)
G = reward + gamma*V_next
advantage = G - value_model.predict(prev_observation)
policy_model.partial_fit(prev_observation, action, advantage)
value_model.partial_fit(prev_observation, G)
#
iters += 1
#
return totalreward, iters
###############################################################################
# we are evaluating the performance of the model at each time t by ...
# ... taking the running average of the adjacent 100 iterations to that time t.
def plot_running_avg(totalrewards):
N = len(totalrewards)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean()
plt.plot(running_avg)
plt.xlabel("Iterations")
plt.ylabel("Average Time")
# plt.show()
curr_path = os.path.abspath(os.getcwd())
plt.savefig(curr_path + '/figs/reward_running_avg_MountainCarContinuous.png')
plt.close()
###############################################################################
# here we plot the negative of the optimal state value functions (i,e, -V*(s))!
# Note that the optimal action values are equal to the negative of the average optimal time ...
# ... that it takes to reach the mountain.
# Hence this plot shows the average optimal time to reach the top of the mountain at each state.
def plot_avg_num_remaining_steps(env, estimator, num_tiles=20):
x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles)
y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles)
X, Y = np.meshgrid(x, y)
# both X and Y will be of shape (num_tiles, num_tiles)
Z = np.apply_along_axis(lambda _: -1*np.max(estimator.predict(_)), 2, np.dstack([X, Y]))
# Z will also be of shape (num_tiles, num_tiles)
#
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, | |
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2808
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from lusid.api_client import ApiClient
from lusid.exceptions import (
ApiTypeError,
ApiValueError
)
class LegalEntitiesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_legal_entity(self, id_type_scope, id_type_code, code, **kwargs): # noqa: E501
"""[EARLY ACCESS] Delete Legal Entity # noqa: E501
Delete a legal entity. Deletion will be valid from the legal entity's creation datetime. This means that the legal entity will no longer exist at any effective datetime from the asAt datetime of deletion. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_legal_entity(id_type_scope, id_type_code, code, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id_type_scope: The scope of the legal entity identifier type. (required)
:param str id_type_code: The code of the legal entity identifier type. (required)
:param str code: Code of the legal entity under specified identifier type's scope and code. This together with defined identifier type uniquely identifies the legal entity to delete. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: DeletedEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_legal_entity_with_http_info(id_type_scope, id_type_code, code, **kwargs) # noqa: E501
def delete_legal_entity_with_http_info(self, id_type_scope, id_type_code, code, **kwargs): # noqa: E501
"""[EARLY ACCESS] Delete Legal Entity # noqa: E501
Delete a legal entity. Deletion will be valid from the legal entity's creation datetime. This means that the legal entity will no longer exist at any effective datetime from the asAt datetime of deletion. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_legal_entity_with_http_info(id_type_scope, id_type_code, code, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id_type_scope: The scope of the legal entity identifier type. (required)
:param str id_type_code: The code of the legal entity identifier type. (required)
:param str code: Code of the legal entity under specified identifier type's scope and code. This together with defined identifier type uniquely identifies the legal entity to delete. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(DeletedEntityResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id_type_scope', 'id_type_code', 'code'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_legal_entity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id_type_scope' is set
if ('id_type_scope' not in local_var_params or
local_var_params['id_type_scope'] is None):
raise ApiValueError("Missing the required parameter `id_type_scope` when calling `delete_legal_entity`") # noqa: E501
# verify the required parameter 'id_type_code' is set
if ('id_type_code' not in local_var_params or
local_var_params['id_type_code'] is None):
raise ApiValueError("Missing the required parameter `id_type_code` when calling `delete_legal_entity`") # noqa: E501
# verify the required parameter 'code' is set
if ('code' not in local_var_params or
local_var_params['code'] is None):
raise ApiValueError("Missing the required parameter `code` when calling `delete_legal_entity`") # noqa: E501
if ('id_type_scope' in local_var_params and
len(local_var_params['id_type_scope']) > 64):
raise ApiValueError("Invalid value for parameter `id_type_scope` when calling `delete_legal_entity`, length must be less than or equal to `64`") # noqa: E501
if ('id_type_scope' in local_var_params and
len(local_var_params['id_type_scope']) < 1):
raise ApiValueError("Invalid value for parameter `id_type_scope` when calling `delete_legal_entity`, length must be greater than or equal to `1`") # noqa: E501
if 'id_type_scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['id_type_scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_scope` when calling `delete_legal_entity`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if ('id_type_code' in local_var_params and
len(local_var_params['id_type_code']) > 64):
raise ApiValueError("Invalid value for parameter `id_type_code` when calling `delete_legal_entity`, length must be less than or equal to `64`") # noqa: E501
if ('id_type_code' in local_var_params and
len(local_var_params['id_type_code']) < 1):
raise ApiValueError("Invalid value for parameter `id_type_code` when calling `delete_legal_entity`, length must be greater than or equal to `1`") # noqa: E501
if 'id_type_code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['id_type_code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_code` when calling `delete_legal_entity`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if ('code' in local_var_params and
len(local_var_params['code']) > 64):
raise ApiValueError("Invalid value for parameter `code` when calling `delete_legal_entity`, length must be less than or equal to `64`") # noqa: E501
if ('code' in local_var_params and
len(local_var_params['code']) < 1):
raise ApiValueError("Invalid value for parameter `code` when calling `delete_legal_entity`, length must be greater than or equal to `1`") # noqa: E501
if 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_legal_entity`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id_type_scope' in local_var_params:
path_params['idTypeScope'] = local_var_params['id_type_scope'] # noqa: E501
if 'id_type_code' in local_var_params:
path_params['idTypeCode'] = local_var_params['id_type_code'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.2808'
return self.api_client.call_api(
'/api/legalentities/{idTypeScope}/{idTypeCode}/{code}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeletedEntityResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_legal_entity(self, id_type_scope, id_type_code, code, **kwargs): # noqa: E501
"""[EARLY ACCESS] Get Legal Entity # noqa: E501
Retrieve the definition of a legal entity. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_legal_entity(id_type_scope, id_type_code, code, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id_type_scope: Scope of the legal entity identifier type. (required)
:param str id_type_code: Code of the legal entity identifier type. (required)
:param str code: Code of the legal entity under specified identifier type's scope and code. This together with stated identifier type uniquely identifies the legal entity. (required)
:param list[str] property_keys: A list of property keys or identifier types (as property keys) from the \"LegalEntity\" domain to include for found legal entity. These take the format {domain}/{scope}/{code} e.g. \"LegalEntity/ContactDetails/Address\".
:param str effective_at: The effective datetime or cut label at which to retrieve the legal entity. Defaults to the current LUSID system datetime if not specified.
:param datetime as_at: The asAt datetime at which to retrieve the legal entity. Defaults to return the latest version of the legal entity if not specified.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: LegalEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_legal_entity_with_http_info(id_type_scope, id_type_code, code, **kwargs) # noqa: E501
def get_legal_entity_with_http_info(self, id_type_scope, id_type_code, code, **kwargs): # noqa: E501
"""[EARLY ACCESS] Get Legal Entity # noqa: | |
# -*- coding: utf-8 -*-
"""Redcarpet_up.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/gist/amankumar444/89d0f234d0622ab938acb10cc858b5ba/redcarpet_up.ipynb
"""
!pip install nsepy
import pandas as pd
import pandas_datareader as dr
import matplotlib.pyplot as plt
import numpy as np
import warnings
from scipy import stats
from nsepy import get_history
from datetime import date
warnings.filterwarnings("ignore")
# %matplotlib inline
"""## Adding Data Sets to the Notebook"""
#Getting Stock price data from yahoo finance using panda_datareader library
#Getting NIFTYIT Index value using nsepy library
TCS = dr.data.get_data_yahoo('TCS.NS', start = '2015-01-01', end ='2016-01-01')
INFY = dr.data.get_data_yahoo('INFY.NS', start = '2015-01-01', end ='2016-01-01')
niftyit = get_history(symbol="NIFTYIT",
start=date(2015,1,1),
end=date(2016,1,1),
index=True)
"""# Time Series plot
## Plots using Matlab Library
### Plots for TCS, Infosys and NIFTY IT index
"""
#Code to plot TCS, INFOSYS, NIFTY IT data set using plt(Matplot library)
plt.plot(TCS['Close'])
plt.show()
plt.plot(INFY['Close'])
plt.show()
plt.plot(niftyit['Close'])
plt.show()
"""# Solution of Questions Mentioned in Problem Statement
### 1. Create 4,16,....,52 week moving average(closing price) for each stock and index
"""
# for question 1, where I was supposed to calculate moving average for weeks 4(20 days),
# 16 weeks(20+60 days), .. , 52 weeks(20+60+60+60+60-13)
def moving_avg(data):
for i in range(20,240,60):
data['Rolling_Mean'] = data['Close'].rolling(window = i).mean()
plt.plot(data['Rolling_Mean'], label = i)
plt.legend()
plt.show()
moving_avg(INFY)
moving_avg(TCS)
moving_avg(niftyit)
"""## 2. Create rolling window of size 10 on each stock/index."""
#Answer to the 2nd Question, where rolling window is incremented with 10 steps
#For all the stocks and index dataset.
def time_s(data):
for i in range(10,80,10):
data['Rolling_Mean'] = data['Close'].rolling(window = i).mean()
plt.plot(data['Rolling_Mean'], label = i)
plt.legend()
plt.show()
time_s(INFY)
time_s(TCS)
time_s(niftyit)
"""## 3. Create the following dummy time series"""
'''Function to generate extra features like Volume Shocks, Price Shocks, Directions and
Pricing shock without volume shock.
Volume Shock = 'Vol_Shock'
Price Shock = 'Price_Shock'
Directions for Vol_Shock = 'direc_Vol_Shock'
Directions for Price_Shock = 'direc_Price_Shock'
Pricing shock without volume shock = 'PSWVS'
'''
def dummy_ts(data):
Vol1 = data.Volume
Close = data.Close
data['Vol_Shock'] = np.zeros(len(data), dtype=float)
data['Price_Shock'] = np.zeros(len(data), dtype=float)
data['direc_Vol_Shock'] = np.zeros(len(data), dtype=float)
data['direc_Price_Shock'] = np.zeros(len(data), dtype=float)
data['PSWVS'] = np.zeros(len(data), dtype=float)
for i in range(1,len(data)):
k = ((Vol1[i]-Vol1[i-1])/Vol1[i-1])*100
if(abs(k)>=10):
data['Vol_Shock'][i] = 1
if(k >0):
data['direc_Vol_Shock'][i] = 1
for j in range(1,len(data)):
l = ((Close[j]-Close[j-1])/Close[j-1])*100
if(abs(l)>=2):
data['Price_Shock'][j] = 1
if(l >0):
data['direc_Price_Shock'][j] = 1
for m in range(1,len(data)):
if(data['Price_Shock'][m] == 1 and data['Vol_Shock'][m] == 0):
data['PSWVS'][m] = 1
dummy_ts(TCS)
dummy_ts(INFY)
dummy_ts(niftyit)
''' Feature extraction code for Volume Shock == 1,
New DataFrame named N_TCS, N_INFY, N_NIFTYIT is selected, which follows above condition.
And 2nd DataFrame named Ne_TCS, Ne_INFY, Ne_NIFTYIT is also selected:
Condition for 2nd DataFrame is PSWVS == 1.
'''
N_TCS = TCS[TCS["Vol_Shock"] == 1]
N_INFY = INFY[INFY["Vol_Shock"] == 1]
N_NIFTYIT = niftyit[niftyit["Vol_Shock"] == 1]
Ne_TCS = TCS[TCS["PSWVS"] == 1]
Ne_INFY = INFY[INFY["PSWVS"] == 1]
Ne_NIFTYIT = niftyit[niftyit["PSWVS"] == 1]
"""# 2nd Part of Question Set
## Create timeseries plot of close prices of stocks/indices with the following features:
1.Color timeseries in simple blue color
2.Color timeseries between two volume shocks in a different color (Red)
3.Mark closing Pricing shock without volume shock to identify volumeless price movement.
"""
#Plot function
def plot_(data,data1,data2):
#fi,axes = plt.subplots(2,2, figsize=(16,10))
plt.plot(data['Close'],color = 'blue');
plt.show()
plt.plot(data1['Close'], color = 'red');
plt.show()
plt.plot(data2['Close'], color = 'black');
plt.show()
plot_(TCS,N_TCS,Ne_TCS)
plot_(INFY,N_INFY,Ne_INFY)
plot_(niftyit,N_NIFTYIT,Ne_NIFTYIT)
"""4.Hand craft partial autocorrelation plot for each stock/index on upto all lookbacks on bokeh"""
#Partial Auto-correlation Plotting for TCS, Infosys stocks and NIFTYIT index
from statsmodels.graphics.tsaplots import plot_pacf
def plot_partial_correlation(data):
plot_pacf(data['Close'], lags=247)
plt.show()
plot_partial_correlation(TCS)
plot_partial_correlation(INFY)
plot_partial_correlation(niftyit)
"""# Regression Models
### Feature Engineering:
Training and Testing Data set is Prepared for TCS, INFY(INFOSYS) and NIFTYIT
Training and Testing Data sets with Lag by 1 shift and 2 shift, is prepared for Proper Regression Analysis
"""
#Application of Regression Model, I have decide to use panda library and shift model data 1 and 2 units
#The approach that Closing Price data regress with itself, only Closing prices are selected for traindata
Y_train_tcs = TCS.Close
Y_train_infy = INFY.Close
Y_train_niftyit = niftyit.Close
#Training Data set are shifted by 1 and 2 units, for all 3 data sets
# Thus, total data set now is 2 for all three cases
X_train_tcs1 = Y_train_tcs.shift(periods = 1)
X_train_tcs2 = Y_train_tcs.shift(periods = 2)
X_train_infy1 = Y_train_infy.shift(periods = 1)
X_train_infy2 = Y_train_infy.shift(periods = 2)
X_train_niftyit1 = Y_train_niftyit.shift(periods = 1)
X_train_niftyit2 = Y_train_niftyit.shift(periods = 2)
#dropping NAN values from the training data_Set, which are 1 or 2 order shifted
X_train_tcs1 = X_train_tcs1.dropna()
X_train_tcs2 = X_train_tcs2.dropna()
X_train_infy1 = X_train_infy1.dropna()
X_train_infy2 = X_train_infy2.dropna()
X_train_niftyit1 = X_train_niftyit1.dropna()
X_train_niftyit2 = X_train_niftyit2.dropna()
#Removal of date from index and Column named data from all three stocks/index
#For Y data set
Y_train_tcs = Y_train_tcs.reset_index()
Y_train_tcs = Y_train_tcs.drop(columns = ['Date'])
Y_train_infy = Y_train_infy.reset_index()
Y_train_infy = Y_train_infy.drop(columns = ['Date'])
Y_train_niftyit = Y_train_niftyit.reset_index()
Y_train_niftyit = Y_train_niftyit.drop(columns = ['Date'])
#traing dataset for condition 1 and 2, here one has shift =1 and 2 has shift = 2
#Removal of date from index and Column named data from all three stocks/index for training data set
#For X data set
X_train_tcs1 = X_train_tcs1.reset_index()
X_train_tcs1 = X_train_tcs1.drop(columns = ['Date'])
X_train_tcs2 = X_train_tcs2.reset_index()
X_train_tcs2 = X_train_tcs2.drop(columns = ['Date'])
X_train_infy1 = X_train_infy1.reset_index()
X_train_infy1 = X_train_infy1.drop(columns = ['Date'])
X_train_infy2 = X_train_infy2.reset_index()
X_train_infy2 = X_train_infy2.drop(columns = ['Date'])
X_train_niftyit1 = X_train_niftyit1.reset_index()
X_train_niftyit1 = X_train_niftyit1.drop(columns = ['Date'])
X_train_niftyit2 = X_train_niftyit2.reset_index()
X_train_niftyit2 = X_train_niftyit2.drop(columns = ['Date'])
#Removal of first and second row from 'Y' dataset, for application of Regression models
Y_train_tcs1 =Y_train_tcs.iloc[1:]
Y_train_tcs2 = Y_train_tcs1.iloc[1:]
Y_train_infy1 = Y_train_infy.iloc[1:]
Y_train_infy2 = Y_train_infy1.iloc[1:]
Y_train_niftyit1 = Y_train_niftyit.iloc[1:]
Y_train_niftyit2 = Y_train_niftyit1.iloc[1:]
#Building of training and testing data set, with last 50 'Closing Price Value' in test
#And first 196, 195, 198 or 197 data sets for 'Y' series
Y_tcs_test1 = Y_train_tcs1.tail(50)
Y_tcs_train1 = Y_train_tcs1[:-50]
Y_infy_test1 = Y_train_infy1.tail(50)
Y_infy_train1 = Y_train_infy1[:-50]
Y_niftyit_test1 = Y_train_niftyit1.tail(50)
Y_niftyit_train1 = Y_train_niftyit1[:-50]
Y_tcs_test2 = Y_train_tcs2.tail(50)
Y_tcs_train2 = Y_train_tcs2[:-50]
Y_infy_test2 = Y_train_infy2.tail(50)
Y_infy_train2 = Y_train_infy2[:-50]
Y_niftyit_test2 = Y_train_niftyit2.tail(50)
Y_niftyit_train2 = Y_train_niftyit2[:-50]
X_tcs_train1_2 = X_train_tcs1.iloc[1:]
X_infy_train1_2 = X_train_infy1.iloc[1:]
X_niftyit_train1_2 = X_train_niftyit1.iloc[1:]
#Building of training and testing data set, with last 50 'Closing Price Value' in test
#And first 196, 195, 198 or 197 data sets for 'X' series
X_tcs_test1 = X_train_tcs1.tail(50)
X_tcs_train1 = X_train_tcs1[:-50]
X_tcs_test2 = X_train_tcs2.tail(50)
X_tcs_train2 = X_train_tcs2[:-50]
X_infy_test1 = X_train_infy1.tail(50)
X_infy_train1 = X_train_infy1[:-50]
X_infy_test2 = X_train_infy2.tail(50)
X_infy_train2 = X_train_infy2[:-50]
X_niftyit_test1 = X_train_niftyit1.tail(50)
X_niftyit_train1 = X_train_niftyit1[:-50]
X_niftyit_test2 = X_train_niftyit2.tail(50)
X_niftyit_train2 = X_train_niftyit2[:-50]
X_tcs_test1_2 = X_tcs_train1_2.tail(50)
X_tcs_train1_2 = X_tcs_train1_2[:-50]
X_infy_test1_2 = X_infy_train1_2.tail(50)
X_infy_train1_2 = X_infy_train1_2[:-50]
X_niftyit_test1_2 = X_niftyit_train1_2.tail(50)
X_niftyit_train1_2 = X_niftyit_train1_2[:-50]
"""### Linear Regression Model with Graph"""
#Application of Linear Regression Model on training and test data sets for 'X' and 'Y'
from sklearn import linear_model
def regression_Model(data_train_x,data_train_y,data_test_x,data_test_y):
reg = linear_model.LinearRegression()
reg.fit(data_train_x,data_train_y)
print(reg.score(data_test_x,data_test_y))
plt.plot(reg.predict(data_test_x), color = 'red')
plt.plot(data_test_y, color = 'blue')
plt.show()
regression_Model(X_tcs_train1,Y_tcs_train1,X_tcs_test1,Y_tcs_test1)
regression_Model(X_tcs_train2,Y_tcs_train2,X_tcs_test2,Y_tcs_test2)
regression_Model(X_infy_train1,Y_infy_train1,X_infy_test1,Y_infy_test1)
regression_Model(X_infy_train2,Y_infy_train2,X_infy_test2,Y_infy_test2)
regression_Model(X_niftyit_train1,Y_niftyit_train1,X_niftyit_test1,Y_niftyit_test1)
regression_Model(X_niftyit_train2,Y_niftyit_train2,X_niftyit_test2,Y_niftyit_test2)
"""### Checking the Closing Price Datasets With SVM Model
Obtained R^2 is -ve, thus SVM Model is not appropriate in this situation
"""
from sklearn.svm import SVR
from sklearn.metrics import r2_score
def SVM_reg(data_train_x,data_train_y,data_test_x,data_test_y):
svr_poly = SVR()
svr_poly.fit(data_train_x,data_train_y)
y_predict = svr_poly.predict(data_test_x)
print(r2_score(data_test_y, y_predict))
#print y_ploy
SVM_reg(X_tcs_train1,Y_tcs_train1,X_tcs_test1,Y_tcs_test1)
"""### Lasso Regression Model (alpha = .0001) with Predicted-y Graph"""
#Application of Lasso Regression Model on training and test data sets for 'X' and 'Y'
#from sklearn.linear_model import Lasso
from sklearn.metrics import r2_score
def lsso_reg(data_train_x,data_train_y,data_test_x,data_test_y):
reg = linear_model.LassoLars(alpha=0.0001)
reg.fit(data_train_x,data_train_y)
y_pred = reg.predict(data_test_x)
print(reg.score(data_test_x,data_test_y))
print(r2_score(data_test_y, y_pred))
#error = y_pred-data_test_y
plt.plot(reg.predict(data_test_x), color = 'red')
plt.plot(data_test_y, color = 'blue')
plt.show()
#return (error)
Error_tcs_1 = lsso_reg(X_tcs_train1,Y_tcs_train1,X_tcs_test1,Y_tcs_test1)
Error_tcs_2 = lsso_reg(X_tcs_train2,Y_tcs_train2,X_tcs_test2,Y_tcs_test2)
Error_infy_1 = lsso_reg(X_infy_train1,Y_infy_train1,X_infy_test1,Y_infy_test1)
Error_infy_2 = lsso_reg(X_infy_train2,Y_infy_train2,X_infy_test2,Y_infy_test2)
Error_niftyit_1 = lsso_reg(X_niftyit_train1,Y_niftyit_train1,X_niftyit_test1,Y_niftyit_test1)
Error_niftyit_2 = lsso_reg(X_niftyit_train2,Y_niftyit_train2,X_niftyit_test2,Y_niftyit_test2)
"""# Checking for assumptions
### 1. Lasso Regression:
"""
'''Prove your model doesnot void assumptions'''
import math
from sklearn.metrics import mean_squared_error
def lsso_reg_check(data_train_x,data_train_y,data_test_x,data_test_y):
#error
reg = linear_model.LassoLars(alpha=0.0001)
reg.fit(data_train_x,data_train_y)
y_pred = reg.predict(data_test_x)
rms = np.sqrt(mean_squared_error(data_test_y, y_pred))
print("RMS error values = ", rms)
lsso_reg_check(X_tcs_train1,Y_tcs_train1,X_tcs_test1,Y_tcs_test1)
lsso_reg_check(X_tcs_train2,Y_tcs_train2,X_tcs_test2,Y_tcs_test2)
lsso_reg_check(X_infy_train1,Y_infy_train1,X_infy_test1,Y_infy_test1)
lsso_reg_check(X_infy_train2,Y_infy_train2,X_infy_test2,Y_infy_test2)
lsso_reg_check(X_niftyit_train1,Y_niftyit_train1,X_niftyit_test1,Y_niftyit_test1)
lsso_reg_check(X_niftyit_train2,Y_niftyit_train2,X_niftyit_test2,Y_niftyit_test2)
"""### 2. Linear Regression Model:"""
def regression_Model_check(data_train_x,data_train_y,data_test_x,data_test_y):
reg = linear_model.LinearRegression()
reg.fit(data_train_x,data_train_y)
y_pred = reg.predict(data_test_x)
rms = np.sqrt(mean_squared_error(data_test_y, y_pred))
print("RMS error values = ", rms)
regression_Model_check(X_tcs_train1,Y_tcs_train1,X_tcs_test1,Y_tcs_test1)
regression_Model_check(X_tcs_train2,Y_tcs_train2,X_tcs_test2,Y_tcs_test2)
regression_Model_check(X_infy_train1,Y_infy_train1,X_infy_test1,Y_infy_test1)
regression_Model_check(X_infy_train2,Y_infy_train2,X_infy_test2,Y_infy_test2)
regression_Model_check(X_niftyit_train1,Y_niftyit_train1,X_niftyit_test1,Y_niftyit_test1)
regression_Model_check(X_niftyit_train2,Y_niftyit_train2,X_niftyit_test2,Y_niftyit_test2)
"""The Root Mean Square Error for Each case is at acceptable range
# Checking for Normality Distributions for different Models
### 1. Histogram, variance and Jarque_Bera test of Residuals for lasso regression model
"""
def histo(data_train_x,data_train_y,data_test_x,data_test_y):
reg = linear_model.LassoLars(alpha=0.001)
reg.fit(data_train_x,data_train_y)
y_pred = reg.predict(data_test_x)
residual = np.asarray(data_test_y.T)-np.asarray(y_pred.T)
residual = residual[0]
v = np.var(residual)
print("Variance = ", v)
print("jarque_bera_Normality = ", stats.jarque_bera(residual));
plt.hist(residual, bins=15)
plt.ylabel('No of times')
plt.show()
histo(X_tcs_train1,Y_tcs_train1,X_tcs_test1,Y_tcs_test1)
histo(X_tcs_train2,Y_tcs_train2,X_tcs_test2,Y_tcs_test2)
histo(X_infy_train1,Y_infy_train1,X_infy_test1,Y_infy_test1)
histo(X_infy_train2,Y_infy_train2,X_infy_test2,Y_infy_test2)
histo(X_niftyit_train1,Y_niftyit_train1,X_niftyit_test1,Y_niftyit_test1)
histo(X_niftyit_train2,Y_niftyit_train2,X_niftyit_test2,Y_niftyit_test2)
"""The Graph in the Above is acquiring Bell Shape (Normal Distribution), my Regression Model is accurate"""
"""### 3.Histogram, variance and Jarque_Bera test of Residuals for Linear Regression Model"""
def histo(data_train_x,data_train_y,data_test_x,data_test_y):
reg = linear_model.LinearRegression()
reg.fit(data_train_x,data_train_y)
y_pred = reg.predict(data_test_x)
residual = np.asarray(data_test_y.T)-np.asarray(y_pred.T)
residual = residual[0]
v = np.var(residual)
print("Variance = ", v)
print("jarque_bera_Normality = ", stats.jarque_bera(residual))
plt.hist(residual, bins=15)
plt.ylabel('No of times')
plt.show()
histo(X_tcs_train1,Y_tcs_train1,X_tcs_test1,Y_tcs_test1)
histo(X_tcs_train2,Y_tcs_train2,X_tcs_test2,Y_tcs_test2)
histo(X_infy_train1,Y_infy_train1,X_infy_test1,Y_infy_test1)
histo(X_infy_train2,Y_infy_train2,X_infy_test2,Y_infy_test2)
histo(X_niftyit_train1,Y_niftyit_train1,X_niftyit_test1,Y_niftyit_test1)
histo(X_niftyit_train2,Y_niftyit_train2,X_niftyit_test2,Y_niftyit_test2)
def lsso_reg_check(data_train_x,data_train_y,data_test_x,data_test_y):
#error
reg = linear_model.LassoLars(alpha=0.0001)
reg.fit(data_train_x,data_train_y)
y_pred = reg.predict(data_test_x)
rms = np.sqrt(mean_squared_error(data_test_y, y_pred))
print("RMS error values = ", rms)
lsso_reg_check(X_tcs_train1,Y_tcs_train1,X_tcs_test1,Y_tcs_test1)
lsso_reg_check(X_tcs_train2,Y_tcs_train2,X_tcs_test2,Y_tcs_test2)
lsso_reg_check(X_infy_train1,Y_infy_train1,X_infy_test1,Y_infy_test1)
lsso_reg_check(X_infy_train2,Y_infy_train2,X_infy_test2,Y_infy_test2)
lsso_reg_check(X_niftyit_train1,Y_niftyit_train1,X_niftyit_test1,Y_niftyit_test1)
lsso_reg_check(X_niftyit_train2,Y_niftyit_train2,X_niftyit_test2,Y_niftyit_test2)
"""## 2.Scatter Plot of Residual and y_pred for Lasso Regression"""
def lsso_reg_check(data_train_x,data_train_y,data_test_x,data_test_y):
#error
reg = linear_model.LassoLars(alpha=0.0001)
reg.fit(data_train_x,data_train_y)
y_pred = reg.predict(data_test_x)
residual = np.asarray(data_test_y.T)-np.asarray(y_pred.T)
residual = residual[0]
plt.scatter(y_pred,residual)
plt.show()
lsso_reg_check(X_tcs_train1,Y_tcs_train1,X_tcs_test1,Y_tcs_test1)
lsso_reg_check(X_tcs_train2,Y_tcs_train2,X_tcs_test2,Y_tcs_test2)
lsso_reg_check(X_infy_train1,Y_infy_train1,X_infy_test1,Y_infy_test1)
lsso_reg_check(X_infy_train2,Y_infy_train2,X_infy_test2,Y_infy_test2)
lsso_reg_check(X_niftyit_train1,Y_niftyit_train1,X_niftyit_test1,Y_niftyit_test1)
lsso_reg_check(X_niftyit_train2,Y_niftyit_train2,X_niftyit_test2,Y_niftyit_test2)
"""Since, we donot find any specific pattern in any of above graph. So, we can conclude the errors are normal
## 1. Scatter Plot of Residual and y_pred for Linear Regression
"""
def regression_Model_check(data_train_x,data_train_y,data_test_x,data_test_y):
reg = linear_model.LinearRegression()
reg.fit(data_train_x,data_train_y)
y_pred = reg.predict(data_test_x)
residual = | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015 NetCharm <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###############################################################################
from __future__ import unicode_literals
from __future__ import division
import os
import sys
from io import StringIO
import codecs
from panda3d.core import *
from panda3d.egg import *
from panda3d.ode import *
from panda3d.bullet import ZUp
from panda3d.bullet import BulletWorld
from panda3d.bullet import BulletDebugNode
from panda3d.bullet import BulletPlaneShape
from panda3d.bullet import BulletRigidBodyNode
from panda3d.bullet import BulletSoftBodyNode
from panda3d.bullet import BulletPlaneShape
from panda3d.bullet import BulletBoxShape
from panda3d.bullet import BulletSphereShape
from panda3d.bullet import BulletCylinderShape
from panda3d.bullet import BulletCapsuleShape
from panda3d.bullet import BulletConeShape
from panda3d.bullet import BulletCharacterControllerNode
from panda3d.bullet import BulletConeTwistConstraint
from direct.actor.Actor import Actor
from .common import *
from pymeshio import pmx
from pymeshio.pmx import reader as pmxReader
DEBUG = True
DEBUG = False
# Get the location of the 'py' file I'm running:
CWD = os.path.abspath(sys.path[0])
def pmxLoad(f_pmx):
model = None
if os.path.isfile(f_pmx):
model = pmxReader.read_from_file(f_pmx)
return(model)
def pmxInfo(model, screen=False):
lines = []
if isinstance(model, pmx.Model):
lines.append(u'path : %s' % model.path)
lines.append(u'version : %s' % model.version)
lines.append(u'name(jpn) : %s' % model.name)
lines.append(u'name(eng) : %s' % model.english_name.strip())
lines.append(u'comment(jpn) : \n{0}\n{1}\n{0}'.format('-'*80, model.comment))
lines.append(u'comment(eng) : \n{0}\n{1}\n{0}'.format('-'*80, model.english_comment.strip()))
lines.append(u'='*80)
lines.append(u'textures : Total {1}.\n{0}'.format('-'*80, len(model.textures)))
idx = 0
for texture in model.textures:
lines.append('%4d : %s' % (idx, texture))
idx += 1
lines.append(u'='*80)
lines.append(u'materials : Total {1}.\n{0}'.format('-'*80, len(model.materials)))
idx = 0
for mat in model.materials:
if idx != 0: lines.append('')
idx += 1
lines.append(u' name(jpn) : %s' % mat.name)
lines.append(u' name(eng) : %s' % mat.english_name.strip())
lines.append(u' diffuse : (%s, %s, %s)' % (mat.diffuse_color.r, mat.diffuse_color.g, mat.diffuse_color.b))
lines.append(u' alpha : %.2f' % mat.alpha)
lines.append(u' specular : (%s, %s, %s), %.2f' % (mat.specular_color.r, mat.specular_color.g, mat.specular_color.b, mat.specular_factor))
lines.append(u' ambient : (%s, %s, %s)' % (mat.ambient_color.r, mat.ambient_color.g, mat.ambient_color.b))
lines.append(u' flag : %s' % mat.flag)
lines.append(u' edge : (%s, %s, %s, %s), %.2f' % (mat.edge_color.r, mat.edge_color.g, mat.edge_color.b, mat.edge_color.a, mat.edge_size))
lines.append(u' texture : %4d' % mat.texture_index)
lines.append(u' sphere : %4d, %4d' % (mat.sphere_mode, mat.sphere_texture_index))
lines.append(u' toon : %4d, %4d' % (mat.toon_sharing_flag, mat.toon_texture_index))
lines.append(u' comment : %s' % mat.comment.strip())
lines.append(u' vertexs : %4d' % mat.vertex_count)
lines.append(u'='*80)
lines.append(u'bones : Total {1}.\n{0}'.format('-'*80, len(model.bones)))
idx = 0
for bone in model.bones:
if idx != 0: lines.append('')
idx += 1
lines.append(u' name(jpn) : %s' % bone.name)
lines.append(u' name(eng) : %s' % bone.english_name.strip())
lines.append(u' position : %s' % str(bone.position.to_tuple()))
lines.append(u' parent_index : %4d' % bone.parent_index)
lines.append(u' layer : %4d' % bone.layer)
lines.append(u' flag : %4d' % bone.flag)
lines.append(u' tail : %4d, %s' % (bone.tail_index, bone.tail_position.to_tuple()))
lines.append(u' effect : %4d, %.4f' % (bone.effect_index, bone.effect_factor))
lines.append(u' fixed_axis : %s' % str(bone.fixed_axis.to_tuple()))
lines.append(u' local_vector : x%s, z%s' % (bone.local_x_vector.to_tuple(), bone.local_z_vector.to_tuple()))
lines.append(u' external_key : %4d' % bone.external_key)
if bone.ik:
ik_links = map(lambda link: (link.bone_index, link.limit_angle, link.limit_max.to_tuple(), link.limit_min.to_tuple()), bone.ik.link)
lines.append(u' ik : %.4f, %s, %4d, %4d' % (bone.ik.limit_radian, ik_links[:5], bone.ik.loop, bone.ik.target_index ))
else:
lines.append(u' ik : %s' % u'')
lines.append(u' index : %4d' % bone.index)
lines.append(u'='*80)
lines.append(u'morphs : Total {1}.\n{0}'.format('-'*80, len(model.morphs)))
idx = 0
for morph in model.morphs:
if idx != 0: lines.append('')
idx += 1
lines.append(u' name(jpn) : %s' % morph.name)
lines.append(u' name(eng) : %s' % morph.english_name.strip())
lines.append(u' panel : %4d' % morph.panel)
lines.append(u' morph_type : %4d' % morph.morph_type)
ol = map(lambda offset: (offset.morph_index, offset.value) if isinstance(offset, pmx.GroupMorphData) else (offset.vertex_index, offset.position_offset.to_tuple()), morph.offsets)
lines.append(u' offsets : %4d, %s' % (len(morph.offsets), ol[:5]))
lines.append(u'='*80)
lines.append(u'display_slot : Total {1}.\n{0}'.format('-'*80, len(model.display_slots)))
idx = 0
for slot in model.display_slots:
if idx != 0: lines.append('')
idx += 1
lines.append(u' name(jpn) : %s' % slot.name)
lines.append(u' name(eng) : %s' % slot.english_name.strip())
lines.append(u' references : %4d, %s' % (len(slot.references), str(slot.references)))
lines.append(u' special_flag : %4d' % slot.special_flag)
lines.append(u'='*80)
lines.append(u'rigidbodies : Total {1}.\n{0}'.format('-'*80, len(model.rigidbodies)))
idx = 0
for rigidbody in model.rigidbodies:
if idx != 0: lines.append('')
idx += 1
lines.append(u' name(jpn) : %s' % rigidbody.name)
lines.append(u' name(eng) : %s' % rigidbody.english_name.strip())
lines.append(u' bone_index : %4d' % rigidbody.bone_index)
lines.append(u' collision_group : %4d' % rigidbody.collision_group)
lines.append(u' no_collision_group : %4d' % rigidbody.no_collision_group)
lines.append(u' shape : %4d, %s, %s, %s' % (rigidbody.shape_type, rigidbody.shape_size.to_tuple(), rigidbody.shape_position.to_tuple(), rigidbody.shape_rotation.to_tuple()))
lines.append(u' param : %4d, %.4f, %.4f, %.4f, %.4f' % (rigidbody.param.mass, rigidbody.param.linear_damping, rigidbody.param.angular_damping, rigidbody.param.restitution, rigidbody.param.friction))
lines.append(u' mode : %4d' % rigidbody.mode)
lines.append(u'='*80)
lines.append(u'joints : Total {1}.\n{0}'.format('-'*80, len(model.joints)))
idx = 0
for joint in model.joints:
if idx != 0: lines.append('')
idx += 1
lines.append(u' name(jpn) : %s' % joint.name)
lines.append(u' name(eng) : %s' % joint.english_name.strip())
lines.append(u' joint_type : %4d' % joint.joint_type)
lines.append(u' rigidbody_index : %4d, %4d' % (joint.rigidbody_index_a, joint.rigidbody_index_b))
lines.append(u' position : %s' % str(joint.position.to_tuple()))
lines.append(u' rotation : %s' % str(joint.rotation.to_tuple()))
lines.append(u' translation_limit : %s, %s' % (joint.translation_limit_min.to_tuple(), joint.translation_limit_max.to_tuple()))
lines.append(u' rotation_limit : %s, %s' % (joint.rotation_limit_min.to_tuple(), joint.rotation_limit_max.to_tuple()))
lines.append(u' spring_constant : %s, %s' % (joint.spring_constant_translation.to_tuple(), joint.spring_constant_rotation.to_tuple()))
lines.append(u'='*80)
lines.append(u'vertices : Total {1}.\n{0}'.format('-'*80, len(model.vertices)))
idx = 0
for vertex in model.vertices:
if idx != 0: lines.append('')
idx += 1
lines.append(u' position : %s' % str(vertex.position.to_tuple()))
lines.append(u' normal : %s' % str(vertex.normal.to_tuple()))
lines.append(u' uv : %s' % str(vertex.uv.to_tuple()))
lines.append(u' deform : %s' % str(vertex.deform))
lines.append(u' edge_factor : %.4f' % vertex.edge_factor)
lines.append(u'='*80)
lines.append(u'indices : Total {1}.\n{0}'.format('-'*80, len(model.indices)))
idx = 0
for indic in model.indices:
lines.append(u' %8d : %8d' % (idx, indic))
idx += 1
lines.append(u'='*80)
if screen:
for line in lines:
print(line)
return(lines)
pass
def pmx2p3d(pmx_model):
return(loadPmxBody(pmx_model))
def loadPmxBody(pmx_model, alpha=True):
modelPath = os.path.dirname(pmx_model.path)
#
# load textures
#
# textures = TextureCollection()
texIndex = 0
textures = []
for tex in pmx_model.textures:
tex_path = os.path.normpath(os.path.join(os.path.dirname(pmx_model.path), tex))
tex_path = os.path.normcase(tex_path)
log(u'Loading Texture %03d: %s' % (texIndex, tex), force=True)
texture = loadTexture(tex_path, model_path=modelPath)
textures.append(texture)
if texture:
log(u'Loaded Texture %03d: %s' % (texIndex, tex))
else:
log(u'Texture Failed %03d: %s' % (texIndex, tex), force=True)
texIndex += 1
modelName = pmx_model.name
#
# load vertices(vertex list)
#
formatArray = GeomVertexArrayFormat()
# formatArray.addColumn(InternalName.make(str("vertex")), 3, Geom.NTFloat32, Geom.C_vector)
# formatArray.addColumn(InternalName.make(str("normal")), 3, Geom.NTFloat32, Geom.C_vector)
# formatArray.addColumn(InternalName.make(str("color")), 4, Geom.NTFloat32, Geom.C_color)
# formatArray.addColumn(InternalName.make(str("texcoord")), 2, Geom.NTFloat32, Geom.C_texcoord)
formatArray.addColumn(InternalName.make(str("edge_factor")), 1, Geom.NTFloat32, Geom.COther)
formatArray.addColumn(InternalName.make(str("drawFlag")), 1, Geom.NTUint8, Geom.COther)
formatArray.addColumn(InternalName.make(str("index")), 1, Geom.NTUint32, Geom.CIndex)
# formatArray.addColumn(InternalName.make(str("morph")), 1, Geom.NTFloat32, Geom.CMorphDelta)
# print(formatArray)
format = GeomVertexFormat(GeomVertexFormat.getV3n3cpt2())
format.addArray(formatArray)
format = GeomVertexFormat.registerFormat(format)
vdata = GeomVertexData(modelName, format, Geom.UHDynamic)
vdata.setNumRows(len(pmx_model.vertices))
vertex = GeomVertexWriter(vdata, str('vertex'))
normal = GeomVertexWriter(vdata, 'normal')
color = GeomVertexWriter(vdata, 'color')
texcoord = GeomVertexWriter(vdata, 'texcoord')
edge = GeomVertexWriter(vdata, 'edge_factor')
index = GeomVertexWriter(vdata, 'index')
idx = 0
skins = dict()
log(u'Loading Vertices : %d' % (len(pmx_model.vertices)), force=True)
for v in pmx_model.vertices:
vertex.addData3f(V2V(v.position))
normal.addData3f(N2N(v.normal))
color.addData4f(.95, .95, .95, 1)
texcoord.addData2f(v.uv.x, v.uv.y)
edge.addData1f(v.edge_factor)
index.addData1i(idx)
idx += 1
#
# bind vertex to bone
#
deform = v.deform
if isinstance(deform, pmx.Bdef1):
bone0 = pmx_model.bones[deform.index0]
if not bone0.name in skins:
skins[bone0.name] = []
skins[bone0.name].append(v)
pass
elif isinstance(deform, pmx.Bdef2):
bone0 = pmx_model.bones[deform.index0]
bone1 = pmx_model.bones[deform.index1]
if not bone0.name in skins:
skins[bone0.name] = []
skins[bone0.name].append((idx, v))
if not bone1.name in skins:
skins[bone1.name] = []
skins[bone1.name].append((idx, v))
pass
elif isinstance(deform, pmx.Bdef4):
bone0 = pmx_model.bones[deform.index0]
bone1 = pmx_model.bones[deform.index1]
bone2 = pmx_model.bones[deform.index2]
bone3 = pmx_model.bones[deform.index3]
if not bone0.name in skins:
skins[bone0.name] = []
skins[bone0.name].append((idx, v))
if not bone1.name in skins:
skins[bone1.name] = []
skins[bone1.name].append((idx, v))
if not bone2.name in skins:
skins[bone2.name] = []
skins[bone2.name].append((idx, v))
if not bone3.name in skins:
skins[bone3.name] = []
skins[bone3.name].append((idx, v))
pass
elif isinstance(deform, pmx.Sdef):
bone0 = pmx_model.bones[deform.index0]
bone1 = pmx_model.bones[deform.index1]
if not bone0.name in skins:
skins[bone0.name] = []
skins[bone0.name].append((idx, v))
if not bone1.name in skins:
skins[bone1.name] = []
skins[bone1.name].append((idx, | |
:rtype:
:param newMasterKey: bytes
"""
return self.C_.call_getter_raw('setMasterKey', {'newMasterKey': newMasterKey}, expect_ec=ts4_expect_ec)
def M_setMasterKey(self, newMasterKey, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Test.setMasterKey method call
:param newMasterKey: bytes
"""
_r_ = self.C_.call_method('setMasterKey', {'newMasterKey': newMasterKey}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_setMasterKey(self, newMasterKey, ts4_expect_ec=0):
"""
Wrapper for D4Test.setMasterKey signed method call
:param newMasterKey: bytes
"""
_r_ = self.C_.call_method_signed('setMasterKey', {'newMasterKey': newMasterKey}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def createAuction(self, name, duration, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Test.createAuction
:rtype:
:param name: bytes
:param duration: uint8
"""
if ts4_sign:
return self.S_createAuction(name, duration, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_createAuction(name, duration, ts4_expect_ec=ts4_expect_ec)
def G_createAuction(self, name, duration, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Test.createAuction getter
:rtype:
:param name: bytes
:param duration: uint8
"""
return self.C_.call_getter('createAuction', {'name': name, 'duration': duration}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_createAuction(self, name, duration, ts4_expect_ec=0):
"""
Wrapper for D4Test.createAuction raw getter
:rtype:
:param name: bytes
:param duration: uint8
"""
return self.C_.call_getter_raw('createAuction', {'name': name, 'duration': duration}, expect_ec=ts4_expect_ec)
def M_createAuction(self, name, duration, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Test.createAuction method call
:param name: bytes
:param duration: uint8
"""
_r_ = self.C_.call_method('createAuction', {'name': name, 'duration': duration}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_createAuction(self, name, duration, ts4_expect_ec=0):
"""
Wrapper for D4Test.createAuction signed method call
:param name: bytes
:param duration: uint8
"""
_r_ = self.C_.call_method_signed('createAuction', {'name': name, 'duration': duration}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def makeBid(self, auction, data, hash, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Test.makeBid
:rtype:
:param auction: address
:param data: bytes
:param hash: uint256
"""
if ts4_sign:
return self.S_makeBid(auction, data, hash, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_makeBid(auction, data, hash, ts4_expect_ec=ts4_expect_ec)
def G_makeBid(self, auction, data, hash, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Test.makeBid getter
:rtype:
:param auction: address
:param data: bytes
:param hash: uint256
"""
return self.C_.call_getter('makeBid', {'auction': auction, 'data': data, 'hash': hash}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_makeBid(self, auction, data, hash, ts4_expect_ec=0):
"""
Wrapper for D4Test.makeBid raw getter
:rtype:
:param auction: address
:param data: bytes
:param hash: uint256
"""
return self.C_.call_getter_raw('makeBid', {'auction': auction, 'data': data, 'hash': hash}, expect_ec=ts4_expect_ec)
def M_makeBid(self, auction, data, hash, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Test.makeBid method call
:param auction: address
:param data: bytes
:param hash: uint256
"""
_r_ = self.C_.call_method('makeBid', {'auction': auction, 'data': data, 'hash': hash}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_makeBid(self, auction, data, hash, ts4_expect_ec=0):
"""
Wrapper for D4Test.makeBid signed method call
:param auction: address
:param data: bytes
:param hash: uint256
"""
_r_ = self.C_.call_method_signed('makeBid', {'auction': auction, 'data': data, 'hash': hash}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def revealBid(self, auction, amount, nonce, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Test.revealBid
:rtype:
:param auction: address
:param amount: uint128
:param nonce: uint128
"""
if ts4_sign:
return self.S_revealBid(auction, amount, nonce, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_revealBid(auction, amount, nonce, ts4_expect_ec=ts4_expect_ec)
def G_revealBid(self, auction, amount, nonce, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Test.revealBid getter
:rtype:
:param auction: address
:param amount: uint128
:param nonce: uint128
"""
return self.C_.call_getter('revealBid', {'auction': auction, 'amount': amount, 'nonce': nonce}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_revealBid(self, auction, amount, nonce, ts4_expect_ec=0):
"""
Wrapper for D4Test.revealBid raw getter
:rtype:
:param auction: address
:param amount: uint128
:param nonce: uint128
"""
return self.C_.call_getter_raw('revealBid', {'auction': auction, 'amount': amount, 'nonce': nonce}, expect_ec=ts4_expect_ec)
def M_revealBid(self, auction, amount, nonce, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Test.revealBid method call
:param auction: address
:param amount: uint128
:param nonce: uint128
"""
_r_ = self.C_.call_method('revealBid', {'auction': auction, 'amount': amount, 'nonce': nonce}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_revealBid(self, auction, amount, nonce, ts4_expect_ec=0):
"""
Wrapper for D4Test.revealBid signed method call
:param auction: address
:param amount: uint128
:param nonce: uint128
"""
_r_ = self.C_.call_method_signed('revealBid', {'auction': auction, 'amount': amount, 'nonce': nonce}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def bidRevealComplete(self, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Test.bidRevealComplete
:rtype:
"""
if ts4_sign:
return self.S_bidRevealComplete(ts4_expect_ec=ts4_expect_ec)
else:
return self.M_bidRevealComplete(ts4_expect_ec=ts4_expect_ec)
def G_bidRevealComplete(self, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Test.bidRevealComplete getter
:rtype:
"""
return self.C_.call_getter('bidRevealComplete', {}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_bidRevealComplete(self, ts4_expect_ec=0):
"""
Wrapper for D4Test.bidRevealComplete raw getter
:rtype:
"""
return self.C_.call_getter_raw('bidRevealComplete', {}, expect_ec=ts4_expect_ec)
def M_bidRevealComplete(self, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Test.bidRevealComplete method call
"""
_r_ = self.C_.call_method('bidRevealComplete', {}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_bidRevealComplete(self, ts4_expect_ec=0):
"""
Wrapper for D4Test.bidRevealComplete signed method call
"""
_r_ = self.C_.call_method_signed('bidRevealComplete', {}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def finalize(self, auction, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Test.finalize
:rtype:
:param auction: address
"""
if ts4_sign:
return self.S_finalize(auction, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_finalize(auction, ts4_expect_ec=ts4_expect_ec)
def G_finalize(self, auction, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Test.finalize getter
:rtype:
:param auction: address
"""
return self.C_.call_getter('finalize', {'auction': auction}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_finalize(self, auction, ts4_expect_ec=0):
"""
Wrapper for D4Test.finalize raw getter
:rtype:
:param auction: address
"""
return self.C_.call_getter_raw('finalize', {'auction': auction}, expect_ec=ts4_expect_ec)
def M_finalize(self, auction, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Test.finalize method call
:param auction: address
"""
_r_ = self.C_.call_method('finalize', {'auction': auction}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_finalize(self, auction, ts4_expect_ec=0):
"""
Wrapper for D4Test.finalize signed method call
:param auction: address
"""
_r_ = self.C_.call_method_signed('finalize', {'auction': auction}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def queryCert(self, target, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Test.queryCert
:rtype:
:param target: address
"""
if ts4_sign:
return self.S_queryCert(target, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_queryCert(target, ts4_expect_ec=ts4_expect_ec)
def G_queryCert(self, target, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Test.queryCert getter
:rtype:
:param target: address
"""
return self.C_.call_getter('queryCert', {'target': target}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_queryCert(self, target, ts4_expect_ec=0):
"""
Wrapper for D4Test.queryCert raw getter
:rtype:
:param target: address
"""
return self.C_.call_getter_raw('queryCert', {'target': target}, expect_ec=ts4_expect_ec)
def M_queryCert(self, target, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Test.queryCert method call
:param target: address
"""
_r_ = self.C_.call_method('queryCert', {'target': target}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_queryCert(self, target, ts4_expect_ec=0):
"""
Wrapper for D4Test.queryCert signed method call
:param target: address
"""
_r_ = self.C_.call_method_signed('queryCert', {'target': target}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def queryAuct(self, target, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Test.queryAuct
:rtype:
:param target: address
"""
if ts4_sign:
return self.S_queryAuct(target, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_queryAuct(target, ts4_expect_ec=ts4_expect_ec)
def G_queryAuct(self, target, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Test.queryAuct getter
:rtype:
:param target: address
"""
return self.C_.call_getter('queryAuct', {'target': target}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_queryAuct(self, target, ts4_expect_ec=0):
"""
Wrapper for D4Test.queryAuct raw getter
:rtype:
:param target: address
"""
return self.C_.call_getter_raw('queryAuct', {'target': target}, expect_ec=ts4_expect_ec)
def M_queryAuct(self, target, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Test.queryAuct method call
:param target: address
"""
_r_ = self.C_.call_method('queryAuct', {'target': target}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_queryAuct(self, target, ts4_expect_ec=0):
"""
Wrapper for D4Test.queryAuct signed method call
:param target: address
"""
_r_ = self.C_.call_method_signed('queryAuct', {'target': target}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def forgetCert(self, target, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Test.forgetCert
:rtype:
:param target: address
"""
if ts4_sign:
return self.S_forgetCert(target, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_forgetCert(target, ts4_expect_ec=ts4_expect_ec)
def G_forgetCert(self, target, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Test.forgetCert getter
:rtype:
:param target: address
"""
return self.C_.call_getter('forgetCert', {'target': target}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_forgetCert(self, target, ts4_expect_ec=0):
"""
Wrapper for D4Test.forgetCert raw getter
:rtype:
:param target: address
"""
return self.C_.call_getter_raw('forgetCert', {'target': target}, expect_ec=ts4_expect_ec)
def M_forgetCert(self, target, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Test.forgetCert method call
:param target: address
"""
_r_ = self.C_.call_method('forgetCert', {'target': target}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_forgetCert(self, target, ts4_expect_ec=0):
"""
Wrapper for D4Test.forgetCert signed method call
:param target: address
"""
_r_ = self.C_.call_method_signed('forgetCert', {'target': target}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def forgetAuct(self, target, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Test.forgetAuct
:rtype:
:param target: address
"""
if ts4_sign:
return self.S_forgetAuct(target, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_forgetAuct(target, ts4_expect_ec=ts4_expect_ec)
def G_forgetAuct(self, target, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Test.forgetAuct getter
:rtype:
:param target: address
"""
return self.C_.call_getter('forgetAuct', {'target': target}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_forgetAuct(self, target, ts4_expect_ec=0):
"""
Wrapper for D4Test.forgetAuct raw getter
:rtype:
:param target: address
"""
return self.C_.call_getter_raw('forgetAuct', {'target': target}, expect_ec=ts4_expect_ec)
def M_forgetAuct(self, target, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Test.forgetAuct method | |
HOH C 2 . ? 42.015 -9.763 40.020 1.00 28.41 ? 430 HOH A O 1
HETATM 4027 O O . HOH C 2 . ? 34.753 -6.728 26.420 1.00 17.50 ? 431 HOH A O 1
HETATM 4028 O O . HOH C 2 . ? 40.204 -13.387 2.304 1.00 35.05 ? 432 HOH A O 1
HETATM 4029 O O . HOH C 2 . ? 40.326 -13.386 14.564 1.00 35.68 ? 433 HOH A O 1
HETATM 4030 O O . HOH C 2 . ? 36.026 2.030 26.654 1.00 20.56 ? 434 HOH A O 1
HETATM 4031 O O . HOH C 2 . ? 67.855 4.717 10.548 1.00 31.96 ? 435 HOH A O 1
HETATM 4032 O O . HOH C 2 . ? 24.415 -16.781 27.551 1.00 27.69 ? 436 HOH A O 1
HETATM 4033 O O . HOH C 2 . ? 31.229 0.240 15.213 1.00 23.32 ? 437 HOH A O 1
HETATM 4034 O O . HOH C 2 . ? 74.819 -11.198 17.014 1.00 27.95 ? 438 HOH A O 1
HETATM 4035 O O . HOH C 2 . ? 65.092 -0.877 -6.188 1.00 43.68 ? 439 HOH A O 1
HETATM 4036 O O . HOH C 2 . ? 54.398 8.362 7.156 1.00 16.20 ? 440 HOH A O 1
HETATM 4037 O O . HOH C 2 . ? 32.266 -2.075 21.360 1.00 22.26 ? 441 HOH A O 1
HETATM 4038 O O . HOH C 2 . ? 62.509 9.285 11.948 1.00 34.77 ? 442 HOH A O 1
HETATM 4039 O O . HOH C 2 . ? 63.725 -15.996 22.309 1.00 30.58 ? 443 HOH A O 1
HETATM 4040 O O . HOH C 2 . ? 44.467 -12.903 12.296 1.00 14.34 ? 444 HOH A O 1
HETATM 4041 O O . HOH C 2 . ? 50.827 -20.582 40.251 1.00 33.03 ? 445 HOH A O 1
HETATM 4042 O O . HOH C 2 . ? 32.595 -11.583 26.692 1.00 25.75 ? 446 HOH A O 1
HETATM 4043 O O . HOH C 2 . ? 46.647 8.635 8.350 1.00 31.35 ? 447 HOH A O 1
HETATM 4044 O O . HOH C 2 . ? 75.712 1.174 12.765 1.00 35.94 ? 448 HOH A O 1
HETATM 4045 O O . HOH C 2 . ? 74.791 -2.404 12.943 1.00 33.37 ? 449 HOH A O 1
HETATM 4046 O O . HOH C 2 . ? 50.178 6.938 31.739 1.00 46.41 ? 450 HOH A O 1
HETATM 4047 O O . HOH C 2 . ? 43.554 -0.181 -5.835 1.00 45.16 ? 451 HOH A O 1
HETATM 4048 O O . HOH C 2 . ? 50.425 11.707 20.039 1.00 46.11 ? 452 HOH A O 1
HETATM 4049 O O . HOH C 2 . ? 44.346 -13.037 6.818 1.00 23.23 ? 453 HOH A O 1
HETATM 4050 O O . HOH C 2 . ? 26.022 -20.088 21.558 1.00 42.91 ? 454 HOH A O 1
HETATM 4051 O O . HOH C 2 . ? 46.046 -12.756 8.957 1.00 16.84 ? 455 HOH A O 1
HETATM 4052 O O . HOH C 2 . ? 30.797 -22.101 35.044 1.00 32.63 ? 456 HOH A O 1
HETATM 4053 O O . HOH C 2 . ? 42.801 3.387 8.988 1.00 25.12 ? 457 HOH A O 1
HETATM 4054 O O . HOH C 2 . ? 48.506 -3.052 36.122 1.00 19.69 ? 458 HOH A O 1
HETATM 4055 O O . HOH C 2 . ? 31.621 -13.643 34.842 1.00 30.62 ? 459 HOH A O 1
HETATM 4056 O O . HOH C 2 . ? 32.591 -7.678 25.466 1.00 33.19 ? 460 HOH A O 1
HETATM 4057 O O . HOH C 2 . ? 33.983 -27.338 19.217 1.00 36.29 ? 461 HOH A O 1
HETATM 4058 O O . HOH C 2 . ? 54.123 -14.365 33.503 1.00 30.95 ? 462 HOH A O 1
HETATM 4059 O O . HOH C 2 . ? 44.410 1.936 -1.763 1.00 29.44 ? 463 HOH A O 1
HETATM 4060 O O . HOH C 2 . ? 49.455 -16.289 0.938 1.00 27.23 ? 464 HOH A O 1
HETATM 4061 O O . HOH C 2 . ? 33.957 -3.714 30.175 1.00 37.14 ? 465 HOH A O 1
HETATM 4062 O O . HOH C 2 . ? 34.675 -4.420 27.832 1.00 19.91 ? 466 HOH A O 1
HETATM 4063 O O . HOH C 2 . ? 63.363 -7.690 31.631 1.00 38.37 ? 467 HOH A O 1
HETATM 4064 O O . HOH C 2 . ? 42.333 9.482 11.231 1.00 38.55 ? 468 HOH A O 1
HETATM 4065 O O . HOH C 2 . ? 29.325 -10.015 22.699 1.00 35.81 ? 469 HOH A O 1
HETATM 4066 O O . HOH C 2 . ? 32.918 -0.927 23.763 1.00 43.65 ? 470 HOH A O 1
HETATM 4067 O O . HOH C 2 . ? 34.656 -11.074 32.059 1.00 29.70 ? 471 HOH A O 1
HETATM 4068 O O . HOH C 2 . ? 64.245 -14.488 24.178 1.00 34.71 ? 472 HOH A O 1
HETATM 4069 O O . HOH C 2 . ? 54.814 9.422 20.108 1.00 35.12 ? 473 HOH A O 1
HETATM 4070 O O . HOH C 2 . ? 74.370 -6.586 18.203 1.00 28.88 ? 474 HOH A O 1
HETATM 4071 O O . HOH C 2 . ? 53.821 -15.493 -0.159 1.00 29.77 ? 475 HOH A O 1
HETATM 4072 O O . HOH C 2 . ? 42.089 -24.036 16.997 1.00 28.65 ? 476 HOH A O 1
HETATM 4073 O O . HOH C 2 . ? 30.774 -4.323 -1.282 1.00 35.40 ? 477 HOH A O 1
HETATM 4074 O O . HOH D 2 . ? 60.670 -31.196 7.322 1.00 17.75 ? 301 HOH B O 1
HETATM 4075 O O . HOH D 2 . ? 61.243 -35.695 10.786 1.00 26.43 ? 302 HOH B O 1
HETATM 4076 O O . HOH D 2 . ? 49.572 -33.263 -13.858 1.00 32.37 ? 303 HOH B O 1
HETATM 4077 O O . HOH D 2 . ? 65.787 -20.387 19.311 1.00 42.18 ? 304 HOH B O 1
HETATM 4078 O O . HOH D 2 . ? 63.810 -43.907 28.667 1.00 36.68 ? 305 HOH B O 1
HETATM 4079 O O . HOH D 2 . ? 33.608 -31.475 13.049 1.00 38.54 ? 306 HOH B O 1
HETATM 4080 O O . HOH D 2 . ? 35.848 -32.208 -15.876 1.00 35.13 ? 307 HOH B O 1
HETATM 4081 O O . HOH D 2 . ? 43.975 -26.015 -10.444 1.00 19.63 ? 308 HOH B O 1
HETATM 4082 O O . HOH D 2 . ? 49.564 -42.819 -9.413 1.00 18.61 ? 309 HOH B O 1
HETATM 4083 O O . HOH D 2 . ? 60.709 -34.193 34.906 1.00 26.55 ? 310 HOH B O 1
HETATM 4084 O O . HOH D 2 . ? 59.885 -23.374 13.975 1.00 17.94 ? 311 HOH B O 1
HETATM 4085 O O . HOH D 2 . ? 70.886 -35.073 16.981 1.00 24.82 ? 312 HOH B O 1
HETATM 4086 O O . HOH D 2 . ? 32.381 -34.203 6.641 1.00 21.63 ? 313 HOH B O 1
HETATM 4087 O O | |
<filename>niftynet/engine/application_driver.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
This module defines a general procedure for running applications.
Example usage::
app_driver = ApplicationDriver()
app_driver.initialise_application(system_param, input_data_param)
app_driver.run_application()
``system_param`` and ``input_data_param`` should be generated using:
``niftynet.utilities.user_parameters_parser.run()``
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import itertools
import tensorflow as tf
from blinker import signal
from niftynet.engine.application_factory import ApplicationFactory
from niftynet.engine.application_iteration import IterationMessage
from niftynet.engine.application_variables import \
CONSOLE, NETWORK_OUTPUT, TF_SUMMARIES
from niftynet.engine.application_variables import \
GradientsCollector, OutputsCollector, global_vars_init_or_restore
from niftynet.io.image_sets_partitioner import ImageSetsPartitioner
from niftynet.io.image_sets_partitioner import TRAIN, VALID, INFER
from niftynet.io.misc_io import get_latest_subfolder, touch_folder
from niftynet.layer.bn import BN_COLLECTION
from niftynet.utilities.util_common import set_cuda_device, traverse_nested
FILE_PREFIX = 'model.ckpt'
# pylint: disable=too-many-instance-attributes
class ApplicationDriver(object):
"""
This class initialises an application by building a TF graph,
and maintaining a session and coordinator. It controls the
starting/stopping of an application. Applications should be
implemented by inheriting ``niftynet.application.base_application``
to be compatible with this driver.
"""
# pylint: disable=too-many-instance-attributes
pre_train_iter = signal('pre_train_iter')
post_train_iter = signal('post_train_iter')
pre_validation_iter = signal('pre_validation_iter')
post_validation_iter = signal('post_validation_iter')
pre_infer_iter = signal('pre_infer_iter')
post_infer_iter = signal('post_infer_iter')
post_training = signal('post_training')
def __init__(self):
self.app = None
self.graph = tf.Graph()
self.saver = None
self.is_training = True
self.num_threads = 0
self.num_gpus = 0
self.model_dir = None
self.summary_dir = None
self.session_prefix = None
self.max_checkpoints = 2
self.save_every_n = 0
self.tensorboard_every_n = -1
self.validation_every_n = -1
self.validation_max_iter = 1
self.initial_iter = 0
self.final_iter = 0
self._coord = tf.train.Coordinator()
self._init_op = None
self._data_partitioner = None
self.outputs_collector = None
self.gradients_collector = None
self.console = None
self.tensorboard = None
self.model_saver = None
def initialise_application(self, workflow_param, data_param):
"""
This function receives all parameters from user config file,
create an instance of application.
:param workflow_param: a dictionary of user parameters,
keys correspond to sections in the config file
:param data_param: a dictionary of input image parameters,
keys correspond to data properties to be used by image_reader
:return:
"""
try:
system_param = workflow_param.get('SYSTEM', None)
net_param = workflow_param.get('NETWORK', None)
train_param = workflow_param.get('TRAINING', None)
infer_param = workflow_param.get('INFERENCE', None)
app_param = workflow_param.get('CUSTOM', None)
except AttributeError:
tf.logging.fatal('parameters should be dictionaries')
raise
assert os.path.exists(system_param.model_dir), \
'Model folder not exists {}'.format(system_param.model_dir)
self.is_training = (system_param.action == "train")
# hardware-related parameters
self.num_threads = max(system_param.num_threads, 1) \
if self.is_training else 1
self.num_gpus = system_param.num_gpus \
if self.is_training else min(system_param.num_gpus, 1)
set_cuda_device(system_param.cuda_devices)
# set output TF model folders
self.model_dir = touch_folder(
os.path.join(system_param.model_dir, 'models'))
self.session_prefix = os.path.join(self.model_dir, FILE_PREFIX)
# set training params.
if self.is_training:
assert train_param, 'training parameters not specified'
summary_root = os.path.join(system_param.model_dir, 'logs')
self.summary_dir = get_latest_subfolder(
summary_root,
create_new=train_param.starting_iter == 0)
self.initial_iter = train_param.starting_iter
self.final_iter = max(train_param.max_iter, self.initial_iter)
self.save_every_n = train_param.save_every_n
self.tensorboard_every_n = train_param.tensorboard_every_n
self.max_checkpoints = \
max(train_param.max_checkpoints, self.max_checkpoints)
self.gradients_collector = GradientsCollector(
n_devices=max(self.num_gpus, 1))
self.validation_every_n = train_param.validation_every_n
if self.validation_every_n > 0:
self.validation_max_iter = max(self.validation_max_iter,
train_param.validation_max_iter)
action_param = train_param
else: # set inference params.
assert infer_param, 'inference parameters not specified'
self.initial_iter = infer_param.inference_iter
action_param = infer_param
self.outputs_collector = OutputsCollector(
n_devices=max(self.num_gpus, 1))
# create an application instance
assert app_param, 'application specific param. not specified'
app_module = ApplicationDriver._create_app(app_param.name)
self.app = app_module(net_param, action_param, system_param.action)
# initialise data input
data_partitioner = ImageSetsPartitioner()
# clear the cached file lists
data_partitioner.reset()
do_new_partition = \
self.is_training and self.initial_iter == 0 and \
(not os.path.isfile(system_param.dataset_split_file)) and \
(train_param.exclude_fraction_for_validation > 0 or
train_param.exclude_fraction_for_inference > 0)
data_fractions = None
if do_new_partition:
assert train_param.exclude_fraction_for_validation > 0 or \
self.validation_every_n <= 0, \
'validation_every_n is set to {}, ' \
'but train/validation splitting not available,\nplease ' \
'check "exclude_fraction_for_validation" in the config ' \
'file (current config value: {}).'.format(
self.validation_every_n,
train_param.exclude_fraction_for_validation)
data_fractions = (train_param.exclude_fraction_for_validation,
train_param.exclude_fraction_for_inference)
if data_param:
data_partitioner.initialise(
data_param=data_param,
new_partition=do_new_partition,
ratios=data_fractions,
data_split_file=system_param.dataset_split_file)
if data_param and self.is_training and self.validation_every_n > 0:
assert data_partitioner.has_validation, \
'validation_every_n is set to {}, ' \
'but train/validation splitting not available.\nPlease ' \
'check dataset partition list {} ' \
'(remove file to generate a new dataset partition). ' \
'Or set validation_every_n to -1.'.format(
self.validation_every_n, system_param.dataset_split_file)
# initialise readers
self.app.initialise_dataset_loader(
data_param, app_param, data_partitioner)
self._data_partitioner = data_partitioner
# pylint: disable=not-context-manager
with self.graph.as_default(), tf.name_scope('Sampler'):
self.app.initialise_sampler()
def _run_sampler_threads(self, session=None):
"""
Get samplers from application and try to run sampler threads.
Note: Overriding app.get_sampler() method by returning None to bypass
this step.
:param session: TF session used for fill
tf.placeholders with sampled data
:return:
"""
if session is None:
return
if self._coord is None:
return
if self.num_threads <= 0:
return
try:
samplers = self.app.get_sampler()
for sampler in traverse_nested(samplers):
if sampler is None:
continue
sampler.run_threads(session, self._coord, self.num_threads)
tf.logging.info('Filling queues (this can take a few minutes)')
except (NameError, TypeError, AttributeError, IndexError):
tf.logging.fatal(
"samplers not running, pop_batch_op operations "
"are blocked.")
raise
def run_application(self):
"""
Initialise a TF graph, connect data sampler and network within
the graph context, run training loops or inference loops.
The training loop terminates when ``self.final_iter`` reached.
The inference loop terminates when there is no more
image sample to be processed from image reader.
:return:
"""
config = ApplicationDriver._tf_config()
with tf.Session(config=config, graph=self.graph) as session:
# start samplers' threads
self._run_sampler_threads(session=session)
self.graph = self._create_graph(self.graph)
# check app variables initialised and ready for starts
self.app.check_initialisations()
# initialise network trainable parameters
self._rand_init_or_restore_vars(session)
start_time = time.time()
loop_status = {}
try:
# iteratively run the graph
if self.is_training:
self.model_saver = ModelSaver(session, self.saver,
self.save_every_n,
self.session_prefix)
loop_status['current_iter'] = self.initial_iter
self._training_loop(session, loop_status)
else:
loop_status['all_saved_flag'] = False
self._inference_loop(session, loop_status)
except KeyboardInterrupt:
tf.logging.warning('User cancelled application')
except tf.errors.OutOfRangeError:
if loop_status.get('all_saved_flag', None) is not None:
# reached the end of inference Dataset
loop_status['all_saved_flag'] = True
except RuntimeError:
import sys
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_traceback, file=sys.stdout)
finally:
tf.logging.info('Cleaning up...')
if self.is_training:
# saving model at the last iteration
iter_msg = IterationMessage()
iter_msg.current_iter = loop_status.get('current_iter', -1)
self.post_training.send(iter_msg)
elif not loop_status.get('all_saved_flag', None):
tf.logging.warning('stopped early, incomplete loops')
tf.logging.info('stopping sampling threads')
self.app.stop()
tf.logging.info(
"%s stopped (time in second %.2f).",
type(self.app).__name__, (time.time() - start_time))
# pylint: disable=not-context-manager
def _create_graph(self, graph=tf.Graph()):
"""
TensorFlow graph is only created within this function.
"""
assert isinstance(graph, tf.Graph)
main_device = self._device_string(0, is_worker=False)
# start constructing the graph, handling training and inference cases
with graph.as_default(), tf.device(main_device):
# initialise network, these are connected in
# the context of multiple gpus
self.app.initialise_network()
self.app.add_validation_flag()
# for data parallelism --
# defining and collecting variables from multiple devices
bn_ops = None
for gpu_id in range(0, max(self.num_gpus, 1)):
worker_device = self._device_string(gpu_id, is_worker=True)
scope_string = 'worker_{}'.format(gpu_id)
with tf.name_scope(scope_string) as scope:
with tf.device(worker_device):
# setup network for each of the multiple devices
self.app.connect_data_and_network(
self.outputs_collector,
self.gradients_collector)
if self.is_training:
# batch norm statistics from the last device
bn_ops = tf.get_collection(BN_COLLECTION, scope)
# assemble all training operations
if self.is_training and self.gradients_collector:
updates_op = []
# batch normalisation moving averages operation
if bn_ops:
updates_op.extend(bn_ops)
# combine them with model parameter updating operation
with tf.name_scope('ApplyGradients'):
with graph.control_dependencies(updates_op):
self.app.set_network_gradient_op(
self.gradients_collector.gradients)
# initialisation operation
with tf.name_scope('Initialization'):
self._init_op = global_vars_init_or_restore()
with tf.name_scope('MergedOutputs'):
self.outputs_collector.finalise_output_op()
# saving operation
self.saver = tf.train.Saver(max_to_keep=self.max_checkpoints,
save_relative_paths=True)
# no more operation definitions after this point
tf.Graph.finalize(graph)
return graph
def _rand_init_or_restore_vars(self, sess):
"""
Randomly initialising all trainable variables defined in session,
or loading checkpoint files as variable initialisations.
"""
tf.logging.info('starting from iter %d', self.initial_iter)
if self.is_training and self.initial_iter == 0:
sess.run(self._init_op)
tf.logging.info('Parameters from random initialisations ...')
return
# check model's folder
assert os.path.exists(self.model_dir), \
"Model folder not found {}, please check" \
"config parameter: model_dir".format(self.model_dir)
# check model's file
ckpt_state = tf.train.get_checkpoint_state(self.model_dir)
if ckpt_state is None:
tf.logging.warning(
"%s/checkpoint not found, please check "
"config parameter: model_dir", self.model_dir)
if self.initial_iter > 0:
checkpoint = '{}-{}'.format(self.session_prefix, self.initial_iter)
else:
try:
checkpoint = ckpt_state.model_checkpoint_path
assert checkpoint, 'checkpoint path not found ' \
'in {}/checkpoints'.format(self.model_dir)
self.initial_iter = int(checkpoint.rsplit('-')[-1])
tf.logging.info('set initial_iter to %d based '
'on checkpoints', self.initial_iter)
except (ValueError, AttributeError):
tf.logging.fatal(
'failed to get iteration number '
'from checkpoint path, please set '
'inference_iter or starting_iter to a positive integer')
raise
# restore session
tf.logging.info('Accessing %s ...', checkpoint)
try:
self.saver.restore(sess, checkpoint)
except tf.errors.NotFoundError:
tf.logging.fatal(
'checkpoint %s not found or variables to restore do not '
'match the current application graph', checkpoint)
raise
def interleaved_iteration_generator(self):
""" This generator yields a sequence of training and validation
iterations """
train_iters = iter_generator(range(self.initial_iter + 1,
self.final_iter + 1), TRAIN)
for train_iter_msg in train_iters:
self.app.set_iteration_update(train_iter_msg)
yield train_iter_msg
if train_iter_msg.current_iter > 0 and\
self.validation_every_n > 0 and \
| |
the base jit
flags = [
"-c", item,
"-v", "q" # only log from the jit.
]
flags += altjit_replay_flags
# Change the working directory to the core root we will call SuperPMI from.
# This is done to allow libcoredistools to be loaded correctly on unix
# as the LoadLibrary path will be relative to the current directory.
with ChangeDir(self.coreclr_args.core_root):
async def create_one_artifact(jit_path: str, location: str, flags) -> str:
command = [self.superpmi_path] + flags + [jit_path, mch_file]
item_path = os.path.join(location, "{}{}".format(item, extension))
with open(item_path, 'w') as file_handle:
logging.debug("%sGenerating %s", print_prefix, item_path)
logging.debug("%sInvoking: %s", print_prefix, " ".join(command))
proc = await asyncio.create_subprocess_shell(" ".join(command), stdout=file_handle, stderr=asyncio.subprocess.PIPE, env=env)
await proc.communicate()
with open(item_path, 'r') as file_handle:
generated_txt = file_handle.read()
return generated_txt
# Generate diff and base JIT dumps
base_txt = await create_one_artifact(self.base_jit_path, base_location, flags + base_option_flags_for_diff_artifact)
diff_txt = await create_one_artifact(self.diff_jit_path, diff_location, flags + diff_option_flags_for_diff_artifact)
if base_txt != diff_txt:
jit_differences_queue.put_nowait(item)
################################################################################################ end of create_replay_artifacts()
diff_items = []
for item in self.diff_mcl_contents:
diff_items.append(item)
logging.info("Creating dasm files: %s %s", base_asm_location, diff_asm_location)
subproc_helper = AsyncSubprocessHelper(diff_items, verbose=True)
subproc_helper.run_to_completion(create_replay_artifacts, self, mch_file, asm_complus_vars_full_env, text_differences, base_asm_location, diff_asm_location, ".dasm")
if self.coreclr_args.diff_jit_dump:
logging.info("Creating JitDump files: %s %s", base_dump_location, diff_dump_location)
subproc_helper.run_to_completion(create_replay_artifacts, self, mch_file, jit_dump_complus_vars_full_env, jit_dump_differences, base_dump_location, diff_dump_location, ".txt")
logging.info("Differences found. To replay SuperPMI use:")
logging.info("")
for var, value in asm_complus_vars.items():
print_platform_specific_environment_vars(logging.INFO, self.coreclr_args, var, value)
logging.info("%s %s -c ### %s %s", self.superpmi_path, " ".join(altjit_replay_flags), self.diff_jit_path, mch_file)
logging.info("")
if self.coreclr_args.diff_jit_dump:
logging.info("To generate JitDump with SuperPMI use:")
logging.info("")
for var, value in jit_dump_complus_vars.items():
print_platform_specific_environment_vars(logging.INFO, self.coreclr_args, var, value)
logging.info("%s %s -c ### %s %s", self.superpmi_path, " ".join(altjit_replay_flags), self.diff_jit_path, mch_file)
logging.info("")
logging.debug("Method numbers with binary differences:")
for item in self.diff_mcl_contents:
logging.debug(item)
logging.debug("")
if base_metrics is not None and diff_metrics is not None:
base_bytes = int(base_metrics["Diffed code bytes"])
diff_bytes = int(diff_metrics["Diffed code bytes"])
logging.info("Total bytes of base: {}".format(base_bytes))
logging.info("Total bytes of diff: {}".format(diff_bytes))
delta_bytes = diff_bytes - base_bytes
logging.info("Total bytes of delta: {} ({:.2%} of base)".format(delta_bytes, delta_bytes / base_bytes))
try:
current_text_diff = text_differences.get_nowait()
except:
current_text_diff = None
logging.info("Generated asm is located under %s %s", base_asm_location, diff_asm_location)
if current_text_diff is not None:
logging.info("Textual differences found in generated asm.")
# Find jit-analyze on PATH, if it exists, then invoke it.
ran_jit_analyze = False
path_var = os.environ.get("PATH")
if path_var is not None:
jit_analyze_file = "jit-analyze.exe" if platform.system() == "Windows" else "jit-analyze"
jit_analyze_path = find_file(jit_analyze_file, path_var.split(os.pathsep))
if jit_analyze_path is not None:
# It appears we have a built jit-analyze on the path, so try to run it.
md_summary_file = os.path.join(asm_root_dir, "summary.md")
summary_file_info = ( mch_file, md_summary_file )
all_md_summary_files.append(summary_file_info)
command = [ jit_analyze_path, "--md", md_summary_file, "-r", "--base", base_asm_location, "--diff", diff_asm_location ]
if self.coreclr_args.retainOnlyTopFiles:
command += [ "--retainOnlyTopFiles" ]
if self.coreclr_args.metrics:
command += [ "--metrics", ",".join(self.coreclr_args.metrics) ]
elif base_bytes is not None and diff_bytes is not None:
command += [ "--override-total-base-metric", str(base_bytes), "--override-total-diff-metric", str(diff_bytes) ]
run_and_log(command, logging.INFO)
ran_jit_analyze = True
if not ran_jit_analyze:
logging.info("jit-analyze not found on PATH. Generate a diff analysis report by building jit-analyze from https://github.com/dotnet/jitutils and running:")
logging.info(" jit-analyze -r --base %s --diff %s", base_asm_location, diff_asm_location)
else:
logging.warning("No textual differences. Is this an issue with coredistools?")
if self.coreclr_args.diff_jit_dump:
try:
current_jit_dump_diff = jit_dump_differences.get_nowait()
except:
current_jit_dump_diff = None
logging.info("Generated JitDump is located under %s %s", base_dump_location, diff_dump_location)
if current_jit_dump_diff is not None:
logging.info("Textual differences found in generated JitDump.")
else:
logging.warning("No textual differences found in generated JitDump. Is this an issue with coredistools?")
if base_metrics is not None and diff_metrics is not None:
missing_base = int(base_metrics["Missing compiles"])
missing_diff = int(diff_metrics["Missing compiles"])
total_contexts = int(base_metrics["Successful compiles"]) + int(base_metrics["Failing compiles"])
if missing_base > 0 or missing_diff > 0:
logging.warning("Warning: SuperPMI encountered missing data during the diff. The diff summary printed above may be misleading.")
logging.warning("Missing with base JIT: {}. Missing with diff JIT: {}. Total contexts: {}.".format(missing_base, missing_diff, total_contexts))
################################################################################################ end of processing asm diffs (if is_nonzero_length_file(diff_mcl_file)...
if not self.coreclr_args.skip_cleanup:
if os.path.isfile(fail_mcl_file):
os.remove(fail_mcl_file)
fail_mcl_file = None
if os.path.isfile(base_metrics_summary_file):
os.remove(base_metrics_summary_file)
base_metrics_summary_file = None
if os.path.isfile(diff_metrics_summary_file):
os.remove(diff_metrics_summary_file)
diff_metrics_summary_file = None
################################################################################################ end of for mch_file in self.mch_files
# Report the overall results summary of the asmdiffs run
logging.info("Asm diffs summary:")
# Construct an overall Markdown summary file.
if len(all_md_summary_files) > 0 and not self.coreclr_args.diff_with_release:
overall_md_summary_file = create_unique_file_name(self.coreclr_args.spmi_location, "diff_summary", "md")
if not os.path.isdir(self.coreclr_args.spmi_location):
os.makedirs(self.coreclr_args.spmi_location)
if os.path.isfile(overall_md_summary_file):
os.remove(overall_md_summary_file)
with open(overall_md_summary_file, "w") as write_fh:
for summary_file_info in all_md_summary_files:
summary_mch = summary_file_info[0]
summary_mch_filename = os.path.basename(summary_mch) # Display just the MCH filename, not the full path
summary_file = summary_file_info[1]
with open(summary_file, "r") as read_fh:
write_fh.write("## " + summary_mch_filename + ":\n\n")
shutil.copyfileobj(read_fh, write_fh)
logging.info(" Summary Markdown file: %s", overall_md_summary_file)
# Report the set of MCH files with asm diffs and replay failures.
if len(files_with_replay_failures) != 0:
logging.info(" Replay failures in %s MCH files:", len(files_with_replay_failures))
for file in files_with_replay_failures:
logging.info(" %s", file)
if len(files_with_asm_diffs) == 0:
logging.info(" No asm diffs")
else:
logging.info(" Asm diffs in %s MCH files:", len(files_with_asm_diffs))
for file in files_with_asm_diffs:
logging.info(" %s", file)
return result
################################################################################################ end of replay_with_asm_diffs()
################################################################################
# SuperPMI Replay/TP diff
################################################################################
class SuperPMIReplayThroughputDiff:
""" SuperPMI Replay throughput diff class
Notes:
The object is responsible for replaying the mch files given to the
instance of the class and doing TP measurements of the two passed jits.
"""
def __init__(self, coreclr_args, mch_files, base_jit_path, diff_jit_path):
""" Constructor
Args:
coreclr_args (CoreclrArguments) : parsed args
mch_files (list) : list of MCH files to replay
base_jit_path (str) : path to baseline clrjit
diff_jit_path (str) : path to diff clrjit
"""
self.base_jit_path = base_jit_path
self.diff_jit_path = diff_jit_path
self.mch_files = mch_files
self.superpmi_path = determine_superpmi_tool_path(coreclr_args)
self.pin_path = get_pin_exe_path(coreclr_args)
self.inscount_pintool_path = get_inscount_pintool_path(coreclr_args)
self.coreclr_args = coreclr_args
self.diff_mcl_contents = None
############################################################################
# Instance Methods
############################################################################
def replay_with_throughput_diff(self):
""" Replay SuperPMI collections measuring throughput differences.
Returns:
(bool) True on success; False otherwise
"""
# Set up some settings we'll use below.
target_flags = []
if self.coreclr_args.arch != self.coreclr_args.target_arch:
target_flags += [ "-target", self.coreclr_args.target_arch ]
base_option_flags = []
if self.coreclr_args.base_jit_option:
for o in self.coreclr_args.base_jit_option:
base_option_flags += "-jitoption", o
diff_option_flags = []
if self.coreclr_args.diff_jit_option:
for o in self.coreclr_args.diff_jit_option:
diff_option_flags += "-jit2option", o
base_jit_compiler_version = determine_clrjit_compiler_version(self.base_jit_path)
diff_jit_compiler_version = determine_clrjit_compiler_version(self.diff_jit_path)
if base_jit_compiler_version != diff_jit_compiler_version:
logging.warning("Warning: Different compilers used for base and diff JITs. Results may be misleading.")
logging.warning(" Base JIT's compiler: {}".format(base_jit_compiler_version))
logging.warning(" Diff JIT's compiler: {}".format(diff_jit_compiler_version))
tp_diffs = []
with TempDir(None, self.coreclr_args.skip_cleanup) as temp_location:
logging.debug("")
logging.debug("Temp Location: %s", temp_location)
logging.debug("")
for mch_file in self.mch_files:
logging.info("Running throughput diff of %s", mch_file)
base_metrics_summary_file = os.path.join(temp_location, os.path.basename(mch_file) + "_base_metrics.csv")
diff_metrics_summary_file = os.path.join(temp_location, os.path.basename(mch_file) + "_diff_metrics.csv")
pin_options = [
"-follow_execv", # attach to child processes
"-t", self.inscount_pintool_path, "-quiet",
]
flags = [
"-applyDiff",
"-baseMetricsSummary", base_metrics_summary_file, # Instruction counts are stored in these
"-diffMetricsSummary", diff_metrics_summary_file,
]
flags += target_flags
flags += base_option_flags
flags += diff_option_flags
if not self.coreclr_args.sequential and not self.coreclr_args.compile:
flags += [ "-p" ]
if self.coreclr_args.break_on_assert:
flags += [ "-boa" ]
if self.coreclr_args.break_on_error:
flags += [ "-boe" ]
if self.coreclr_args.compile:
flags += [ "-c", self.coreclr_args.compile ]
if self.coreclr_args.spmi_log_file is not None:
flags += [ "-w", self.coreclr_args.spmi_log_file ]
if self.coreclr_args.error_limit is not None:
flags += ["-failureLimit", self.coreclr_args.error_limit]
# Change the working directory to the Core_Root we will call SuperPMI from.
# This is done to allow libcoredistools to be loaded correctly on unix
# as the loadlibrary path will be relative to the current directory.
with ChangeDir(self.coreclr_args.core_root):
command = [self.pin_path] + pin_options + ["--"] + [self.superpmi_path] + flags + [self.base_jit_path, self.diff_jit_path, mch_file]
return_code = run_and_log(command)
logging.debug("return_code: %s", return_code)
base_metrics = read_csv_metrics(base_metrics_summary_file)
diff_metrics = read_csv_metrics(diff_metrics_summary_file)
if base_metrics is not None and diff_metrics is not None:
base_instructions = int(base_metrics["Diff executed instructions"])
diff_instructions = int(diff_metrics["Diff executed instructions"])
logging.info("Total instructions executed by base: {}".format(base_instructions))
logging.info("Total instructions executed by diff: {}".format(diff_instructions))
delta_instructions = diff_instructions - base_instructions
logging.info("Total instructions executed delta: {} ({:.2%} of base)".format(delta_instructions, delta_instructions / base_instructions))
tp_diffs.append((os.path.basename(mch_file), base_instructions, diff_instructions))
else:
logging.warning("No metric files present?")
if not self.coreclr_args.skip_cleanup:
if os.path.isfile(base_metrics_summary_file):
os.remove(base_metrics_summary_file)
base_metrics_summary_file = None
if os.path.isfile(diff_metrics_summary_file):
os.remove(diff_metrics_summary_file)
diff_metrics_summary_file = None
################################################################################################ end of for mch_file in self.mch_files
# Report the overall results summary of the tpdiff run
logging.info("Throughput diff summary:")
# Construct an overall Markdown summary file.
if len(tp_diffs) > | |
<filename>code/convLSTM(2).py
# Imports
import datetime
import pickle
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from evaluation import *
def load_data(sequenceFile, labelFile):
"""
加载数据,将本地持久化的pkl文件加载并划分成训练集,验证集以及测试集
:param sequenceFile: 输入特征的本地持久化pkl文件
:param labelFile: 真实标签的本地持久化pkl文件
:param bucketing: 是否进行bucketing操作。bucketing操作可以在训练时减少mini-batch的padding算法复杂度
:return:train_set: 训练集元组(输入,标签)
valid_set: 验证集元组(输入,标签)
test_set: 测试集元组(输入,标签)
"""
sequences = pickle.load(open(sequenceFile, 'rb')) # 读取输入序列文件
labels = pickle.load(open(labelFile, 'rb')) # 读取真实标签文件
labels = np.squeeze(np.reshape(labels, (-1, 1)))
seq_len = np.array([len(seq) for seq in sequences]) # 获取每个样本的长度
# 下面从正样本和负样本中分别随机抽取相同比例的样本,并分为训练集、测试集、验证集
dataSize = len(labels) # 获取数据集的样本总数
ind_p = np.squeeze(np.argwhere(labels == 1)) # 找到正例样本的下标
ind_f = np.squeeze(np.setdiff1d(np.arange(dataSize), ind_p)) # 负样本的下标
per_ind_p = np.random.permutation(ind_p) # 生成正样本随机排列数,作为数据集的随机索引
per_ind_f = np.random.permutation(ind_f) # 生成负样本随机排列数,作为数据集的随机索引
ind1 = int(0.1 * len(ind_p))
ind2 = int(0.1 * len(ind_f))
testP_ind = ind_p[:ind1]
valP_ind = ind_p[ind1:ind1 * 2]
trainP_ind = ind_p[ind1 * 2:]
testf_ind = ind_f[:ind2]
valf_ind = ind_f[ind2:ind2 * 2]
trainf_ind = ind_f[ind2 * 2:]
test_indices = np.random.permutation(np.concatenate((testP_ind, testf_ind))) # 测试集的样本下标
valid_indices = np.random.permutation(np.concatenate((valP_ind, valf_ind))) # 训练集的样本下标
train_indices = np.random.permutation(np.concatenate((trainP_ind, trainf_ind))) # 训练集的样本下标
"""
#ind = np.random.permutation(dataSize) # 生成随机排列数,作为数据集的随机索引
nTest = int(0.10 * dataSize) # 划分数据集的10%作为验证集
nValid = int(0.10 * dataSize) # 划分数据集的10%作为测试集
test_indices = ind[:nTest] # 获取测试集的索引
valid_indices = ind[nTest:nTest + nValid] # 获取验证集的索引
train_indices = ind[nTest + nValid:] # 获取训练集的索引
"""
train_set_x = sequences[train_indices] # 根据训练集索引数组得到训练集的特征输入列表
train_set_y = labels[train_indices] # 根据训练集索引数组得到训练集的真实标签列表
test_set_x = sequences[test_indices] # 根据测试集索引数组得到测试集的特征输入列表
test_set_y = labels[test_indices] # 根据测试集索引数组得到测试集的真实标签列表
valid_set_x = sequences[valid_indices] # 根据验证集索引数组得到验证集的特征输入列表
valid_set_y = labels[valid_indices] # 根据验证集索引数组得到验证集的真实标签列表
return train_set_x, train_set_y, valid_set_x, valid_set_y, test_set_x, test_set_y
def one_hot(labels, n_class=2):
""" One-hot 编码 """
expansion = np.eye(n_class)
y = expansion[:, labels].T
assert y.shape[1] == n_class, "Wrong number of labels!"
return y
def get_batches(X, y, batch_size=100):
""" Return a generator for batches """
n_batches = len(X) // batch_size
X, y = X[:n_batches * batch_size], y[:n_batches * batch_size]
# Loop over batches and yield
for b in range(0, len(X), batch_size):
yield X[b:b + batch_size], y[b:b + batch_size]
def model(dataFile, labelFile, lstm_size, lstm_layers, batch_size, seq_len, gap_len, learning_rate, epochs, keep_prob):
X_tr, lab_tr, X_vld, lab_vld, X_test, lab_test = load_data(dataFile, labelFile)
X_tr = X_tr[:, -(seq_len + gap_len):-gap_len, :]
X_vld = X_vld[:, -(seq_len + gap_len):-gap_len, :]
X_test = X_test[:, -(seq_len + gap_len):-gap_len, :]
y_tr = one_hot(np.squeeze(lab_tr))
y_vld = one_hot(np.squeeze(lab_vld))
y_test = one_hot(np.squeeze(lab_test))
# Fixed
n_classes = 2
n_channels = 51
# train_set, valid_set, test_set = load_data(dataFile, labelFile, bucketing=True)
graph = tf.Graph()
# Construct placeholders
with graph.as_default():
inputs_ = tf.placeholder(tf.float32, [None, None, n_channels], name='inputs')
labels_ = tf.placeholder(tf.float32, [None, n_classes], name='labels')
keep_prob_ = tf.placeholder(tf.float32, name='keep')
learning_rate_ = tf.placeholder(tf.float32, name='learning_rate')
# Convolutional layers
# filters 是卷积核数量(Integer, the dimensionality of the output space)
# with graph.as_default():
# (batch, 128, 9) --> (batch, 128, 10)
with tf.name_scope('conv1'):
conv1 = tf.layers.conv1d(inputs=inputs_, filters=100, kernel_size=6, strides=1,
padding='same', activation=tf.nn.relu)
# n_ch = n_channels * 2 # n_ch是卷积后的特征数量
n_ch = 100
with tf.name_scope('LSTM_in'):
# with graph.as_default():
# Construct the LSTM inputs and LSTM cells
lstm_in = tf.transpose(conv1, [1, 0, 2]) # reshape into (seq_len, batch, channels)
lstm_in = tf.reshape(lstm_in, [-1, n_ch]) # Now (seq_len*N, n_channels)
# To cells
"""
tf.layers.dense()
This layer implements the operation: outputs = activation(inputs.kernel + bias) Where activation is the
activation function passed as the activation argument (if not None), kernel is a weights matrix created by
the layer, and bias is a bias vector created by the layer (only if use_bias is True).
此函数参数默认加bias
kernel_initializer: Initializer function for the weight matrix. If None (default),
weights are initialized using the default initializer used by tf.get_variable.
"""
lstm_in = tf.layers.dense(lstm_in, lstm_size, activation=None) # or tf.nn.relu, tf.nn.sigmoid, tf.nn.tanh?
# Open up the tensor into a list of seq_len pieces
lstm_in = tf.split(lstm_in, seq_len, 0)
"""
BasicRNNCell是最基本的RNNcell单元。
输入参数:num_units:RNN层神经元的个数
input_size(该参数已被弃用)
activation: 内部状态之间的激活函数
reuse: Python布尔值, 描述是否重用现有作用域中的变量
# 使用 DropoutWrapper 类来实现 dropout 功能,output_keep_prob 控制输出的 dropout 概率
"""
# Add LSTM layers
with tf.name_scope('RNN'):
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob_)
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
# with graph.as_default():
"""
单层rnn: tf.contrib.rnn.static_rnn:
参数:inputs是长为T的列表A length T list of inputs, each a `Tensor` of shape
[batch_size, input_size]`, or a nested tuple of such elements
dtype是初始状态和期望输出的数据类型
输出: A pair (outputs, state) where:
- outputs is a length T list of outputs (one for each input), or a nested
tuple of such elements.
- state is the final state
还有rnn中加dropout
"""
outputs, final_state = tf.contrib.rnn.static_rnn(cell, lstm_in, dtype=tf.float32,
initial_state=initial_state)
"""
final_outputs = final_state[layer_size - 1][1] # 返回最后一层最后一个状态元组的第二个张量,作为输出
preds = tf.matmul(final_outputs, weight['out']) + bias['out']
probs = tf.sigmoid(preds)
"""
# We only need the last output tensor to pass into a classifier
logits = tf.layers.dense(outputs[-1], n_classes, name='logits')
with tf.name_scope('cross_entropy'):
# Cost function and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels_))
tf.summary.scalar("loss", cost)
# optimizer = tf.train.AdamOptimizer(learning_rate_).minimize(cost) # No grad clipping
with tf.name_scope('train'):
# Grad clipping
# tf.train.AdamOptimizer函数默认参数ersilon = 1e-08
train_op = tf.train.AdamOptimizer(learning_rate_)
gradients = train_op.compute_gradients(cost)
"""
tf.clip_by_value:
Given a tensor t, this operation returns a tensor of the same type and shape as t with its values clipped to
clip_value_min and clip_value_max. Any values less than clip_value_min are set to clip_value_min.
Any values greater than clip_value_max are set to clip_value_max.
"""
with tf.name_scope('clip_value'):
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
optimizer = train_op.apply_gradients(capped_gradients)
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_pred = tf.equal(tf.cast(tf.greater(preds, 0.5), tf.float32), tf.cast(labels_, tf.float32))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
"""
with tf.name_scope('accuracy'):
# Accuracy
y_pred = tf.argmax(logits, 1);
y_true = tf.argmax(labels_, 1)
with tf.name_scope('correct_prediction'):
correct_pred = tf.equal(y_pred, y_true) # tf.argmax就是返回最大的那个数值所在的下标
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy') # tf.cast:用于改变某个张量的数据类型
tf.summary.scalar("accuracy", accuracy)
"""
with tf.name_scope('PR'):
precision, TPFP = tf.metrics.precision_at_thresholds(labels_, logits, name='precision',thresholds=201)
recall, TPFN = tf.metrics.recall_at_thresholds(labels_, logits, name='recall',thresholds=201)
TP, TP_update = tf.metrics.true_positives_at_thresholds(labels_, logits, name='TP',thresholds=201)
TN, TN_updata = tf.metrics.true_negatives_at_thresholds(labels_, logits, name='TN',thresholds=201)
FP, FP_update = tf.metrics.false_positives_at_thresholds(labels_, logits, name='FP',thresholds=201)
FN, FN_update = tf.metrics.false_negatives_at_thresholds(labels_, logits, name='FN',thresholds=201)
summary_lib.pr_curve_raw_data(name='prc', true_positive_counts=TP, false_positive_counts=FP,
true_negative_counts=TN, false_negative_counts=FN, precision=precision,
recall=recall, display_name='PR Curve',num_thresholds=201)
summary_lib.scalar('f1_max', tf.reduce_max(2.0 * precision * recall / tf.maximum(precision + recall, 1e-7)))
_, update_op = summary_lib.pr_curve_streaming_op('foo',
predictions=logits,
labels=labels_,
num_thresholds=201)
"""
validation_acc = []
validation_loss = []
train_acc = []
train_loss = []
with graph.as_default():
saver = tf.train.Saver()
merged = tf.summary.merge_all()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config, graph=graph)
with session as sess:
train_writer = tf.summary.FileWriter(LOGDIR + hparam_str,
session.graph)
test_writer = tf.summary.FileWriter('F://301/github/septic-shock/code/test')
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
# Initialize
state = sess.run(initial_state)
# Loop over batches
for x, y in get_batches(X_tr, y_tr, batch_size):
# Feed dictionary
feed = {inputs_: x, labels_: y, keep_prob_: keep_prob,
initial_state: state, learning_rate_: learning_rate}
loss, output, _, state, acc, summary = sess.run([cost, outputs, optimizer, final_state, accuracy,
merged], feed_dict=feed)
train_acc.append(acc)
train_loss.append(loss)
train_writer.add_summary(summary, e)
"""
# Print at each 5 iters
if (iteration % 5 == 0):
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {:d}".format(iteration),
"Train loss: {:6f}".format(loss),
"Train acc: {:.6f}".format(acc))
"""
# 每经过34次iteration计算交叉验证集的损失函数以及正确率等指标
# 这里选择34次是因为训练集有34个batch,每经过34次iteration就相当于在训练集上完成一遍训练
if (iteration % 34 == 0):
# Initiate for validation set
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
val_acc_ = []
val_loss_ = []
val_pred = []
val_true = []
for x_v, y_v in get_batches(X_vld, y_vld, batch_size):
# Feed
feed = {inputs_: x_v, labels_: y_v, keep_prob_: 1.0, initial_state: val_state}
# Loss
loss_v, state_v, y_pred_v, y_true_v, acc_v, = sess.run(
[cost, final_state, y_pred, y_true, accuracy], feed_dict=feed)
val_pred.append(y_pred_v)
val_true.append(y_true_v)
val_acc_.append(acc_v)
val_loss_.append(loss_v)
# test_writer.add_summary(summary, e)
auc_v = auc(val_true, val_pred)
precision_v, recall_v = precision_recall(val_true, val_pred)
# Print info
"""
print("Validation: Epoch: {}/{}".format(e, epochs),
"Iteration: {:d}".format(iteration),
"loss: {:6f}".format(np.mean(val_loss_)),
"acc: {:.6f}".format(np.mean(val_acc_)),
"auc:{:.2f}".format(auc_v),
"precision: {:.4f}".format(precision_v),
"recall: {:.4f}".format(recall_v))
"""
# Store
validation_acc.append(np.mean(val_acc_))
validation_loss.append(np.mean(val_loss_))
# Iterate
iteration += 1
saver.save(sess, "checkpoints-crnn/har.ckpt")
"""
print("length of Output is:{}".format(len(output)),
"and output 1 is:{}".format(output[-1].shape))
print("length of state is:{}".format(len(state)),
"and state 1 is:{}".format(state[0][-1].shape))
#print("output[-1] == state[1][-1] is: {}".format((output[-1]==state[1][-1])))
"""
# Plot training and test loss
t = np.arange(1, iteration)
plt.figure(figsize=(12, 6))
plt.title('hparam:' + hparam_str)
plt.subplot(121)
plt.plot(t, np.array(train_loss), 'r-', t[t % 34 == 0], np.array(validation_loss), 'b*')
plt.ylim(-0.1, 1.0)
plt.xlabel("iteration")
plt.ylabel("Loss")
plt.legend(['train', 'validation'], loc='upper right')
# plt.show()
# Plot Accuracies
# plt.figure(figsize=(6, 6))
plt.subplot(122)
plt.plot(t, np.array(train_acc), 'r-', t[t % 34 == 0], validation_acc, 'b*')
plt.ylim(0.6, 1.01, 0.05)
plt.xlabel("iteration")
plt.ylabel("Accuray")
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
test_acc = []
with tf.Session(graph=graph) as sess:
# Restore
saver.restore(sess, tf.train.latest_checkpoint('checkpoints-crnn'))
y_true_test = []
y_pred_test = []
for x_t, y_t in get_batches(X_test, y_test, batch_size):
feed = | |
import medicationdispense
return medicationdispense.MedicationDispense(jsondict)
if "MedicationDispenseSubstitution" == resource_type:
from . import medicationdispense
return medicationdispense.MedicationDispenseSubstitution(jsondict)
if "MedicationDispensePerformer" == resource_type:
from . import medicationdispense
return medicationdispense.MedicationDispensePerformer(jsondict)
if "MedicationKnowledge" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledge(jsondict)
if "MedicationKnowledgeKinetics" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeKinetics(jsondict)
if "MedicationKnowledgeRegulatory" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeRegulatory(jsondict)
if "MedicationKnowledgeDrugCharacteristic" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeDrugCharacteristic(jsondict)
if "MedicationKnowledgePackaging" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgePackaging(jsondict)
if "MedicationKnowledgeMedicineClassification" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeMedicineClassification(jsondict)
if "MedicationKnowledgeAdministrationGuidelines" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeAdministrationGuidelines(jsondict)
if "MedicationKnowledgeMonitoringProgram" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeMonitoringProgram(jsondict)
if "MedicationKnowledgeCost" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeCost(jsondict)
if "MedicationKnowledgeIngredient" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeIngredient(jsondict)
if "MedicationKnowledgeMonograph" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeMonograph(jsondict)
if "MedicationKnowledgeRelatedMedicationKnowledge" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeRelatedMedicationKnowledge(jsondict)
if "MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics(jsondict)
if "MedicationKnowledgeAdministrationGuidelinesDosage" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeAdministrationGuidelinesDosage(jsondict)
if "MedicationKnowledgeRegulatoryMaxDispense" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeRegulatoryMaxDispense(jsondict)
if "MedicationKnowledgeRegulatorySchedule" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeRegulatorySchedule(jsondict)
if "MedicationKnowledgeRegulatorySubstitution" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeRegulatorySubstitution(jsondict)
if "MedicationRequest" == resource_type:
from . import medicationrequest
return medicationrequest.MedicationRequest(jsondict)
if "MedicationRequestSubstitution" == resource_type:
from . import medicationrequest
return medicationrequest.MedicationRequestSubstitution(jsondict)
if "MedicationRequestDispenseRequest" == resource_type:
from . import medicationrequest
return medicationrequest.MedicationRequestDispenseRequest(jsondict)
if "MedicationRequestDispenseRequestInitialFill" == resource_type:
from . import medicationrequest
return medicationrequest.MedicationRequestDispenseRequestInitialFill(jsondict)
if "MedicationStatement" == resource_type:
from . import medicationstatement
return medicationstatement.MedicationStatement(jsondict)
if "MedicinalProduct" == resource_type:
from . import medicinalproduct
return medicinalproduct.MedicinalProduct(jsondict)
if "MedicinalProductSpecialDesignation" == resource_type:
from . import medicinalproduct
return medicinalproduct.MedicinalProductSpecialDesignation(jsondict)
if "MedicinalProductManufacturingBusinessOperation" == resource_type:
from . import medicinalproduct
return medicinalproduct.MedicinalProductManufacturingBusinessOperation(jsondict)
if "MedicinalProductName" == resource_type:
from . import medicinalproduct
return medicinalproduct.MedicinalProductName(jsondict)
if "MedicinalProductNameCountryLanguage" == resource_type:
from . import medicinalproduct
return medicinalproduct.MedicinalProductNameCountryLanguage(jsondict)
if "MedicinalProductNameNamePart" == resource_type:
from . import medicinalproduct
return medicinalproduct.MedicinalProductNameNamePart(jsondict)
if "MedicinalProductAuthorization" == resource_type:
from . import medicinalproductauthorization
return medicinalproductauthorization.MedicinalProductAuthorization(jsondict)
if "MedicinalProductAuthorizationProcedure" == resource_type:
from . import medicinalproductauthorization
return medicinalproductauthorization.MedicinalProductAuthorizationProcedure(jsondict)
if "MedicinalProductAuthorizationJurisdictionalAuthorization" == resource_type:
from . import medicinalproductauthorization
return medicinalproductauthorization.MedicinalProductAuthorizationJurisdictionalAuthorization(jsondict)
if "MedicinalProductContraindication" == resource_type:
from . import medicinalproductcontraindication
return medicinalproductcontraindication.MedicinalProductContraindication(jsondict)
if "MedicinalProductContraindicationOtherTherapy" == resource_type:
from . import medicinalproductcontraindication
return medicinalproductcontraindication.MedicinalProductContraindicationOtherTherapy(jsondict)
if "MedicinalProductIndication" == resource_type:
from . import medicinalproductindication
return medicinalproductindication.MedicinalProductIndication(jsondict)
if "MedicinalProductIndicationOtherTherapy" == resource_type:
from . import medicinalproductindication
return medicinalproductindication.MedicinalProductIndicationOtherTherapy(jsondict)
if "MedicinalProductIngredient" == resource_type:
from . import medicinalproductingredient
return medicinalproductingredient.MedicinalProductIngredient(jsondict)
if "MedicinalProductIngredientSubstance" == resource_type:
from . import medicinalproductingredient
return medicinalproductingredient.MedicinalProductIngredientSubstance(jsondict)
if "MedicinalProductIngredientSpecifiedSubstance" == resource_type:
from . import medicinalproductingredient
return medicinalproductingredient.MedicinalProductIngredientSpecifiedSubstance(jsondict)
if "MedicinalProductIngredientSpecifiedSubstanceStrength" == resource_type:
from . import medicinalproductingredient
return medicinalproductingredient.MedicinalProductIngredientSpecifiedSubstanceStrength(jsondict)
if "MedicinalProductIngredientSpecifiedSubstanceStrengthReferenceStrength" == resource_type:
from . import medicinalproductingredient
return medicinalproductingredient.MedicinalProductIngredientSpecifiedSubstanceStrengthReferenceStrength(jsondict)
if "MedicinalProductInteraction" == resource_type:
from . import medicinalproductinteraction
return medicinalproductinteraction.MedicinalProductInteraction(jsondict)
if "MedicinalProductInteractionInteractant" == resource_type:
from . import medicinalproductinteraction
return medicinalproductinteraction.MedicinalProductInteractionInteractant(jsondict)
if "MedicinalProductManufactured" == resource_type:
from . import medicinalproductmanufactured
return medicinalproductmanufactured.MedicinalProductManufactured(jsondict)
if "MedicinalProductPackaged" == resource_type:
from . import medicinalproductpackaged
return medicinalproductpackaged.MedicinalProductPackaged(jsondict)
if "MedicinalProductPackagedPackageItem" == resource_type:
from . import medicinalproductpackaged
return medicinalproductpackaged.MedicinalProductPackagedPackageItem(jsondict)
if "MedicinalProductPackagedBatchIdentifier" == resource_type:
from . import medicinalproductpackaged
return medicinalproductpackaged.MedicinalProductPackagedBatchIdentifier(jsondict)
if "MedicinalProductPharmaceutical" == resource_type:
from . import medicinalproductpharmaceutical
return medicinalproductpharmaceutical.MedicinalProductPharmaceutical(jsondict)
if "MedicinalProductPharmaceuticalRouteOfAdministration" == resource_type:
from . import medicinalproductpharmaceutical
return medicinalproductpharmaceutical.MedicinalProductPharmaceuticalRouteOfAdministration(jsondict)
if "MedicinalProductPharmaceuticalCharacteristics" == resource_type:
from . import medicinalproductpharmaceutical
return medicinalproductpharmaceutical.MedicinalProductPharmaceuticalCharacteristics(jsondict)
if "MedicinalProductPharmaceuticalRouteOfAdministrationTargetSpecies" == resource_type:
from . import medicinalproductpharmaceutical
return medicinalproductpharmaceutical.MedicinalProductPharmaceuticalRouteOfAdministrationTargetSpecies(jsondict)
if "MedicinalProductPharmaceuticalRouteOfAdministrationTargetSpeciesWithdrawalPeriod" == resource_type:
from . import medicinalproductpharmaceutical
return medicinalproductpharmaceutical.MedicinalProductPharmaceuticalRouteOfAdministrationTargetSpeciesWithdrawalPeriod(jsondict)
if "MedicinalProductUndesirableEffect" == resource_type:
from . import medicinalproductundesirableeffect
return medicinalproductundesirableeffect.MedicinalProductUndesirableEffect(jsondict)
if "MessageDefinition" == resource_type:
from . import messagedefinition
return messagedefinition.MessageDefinition(jsondict)
if "MessageDefinitionAllowedResponse" == resource_type:
from . import messagedefinition
return messagedefinition.MessageDefinitionAllowedResponse(jsondict)
if "MessageDefinitionFocus" == resource_type:
from . import messagedefinition
return messagedefinition.MessageDefinitionFocus(jsondict)
if "MessageHeader" == resource_type:
from . import messageheader
return messageheader.MessageHeader(jsondict)
if "MessageHeaderResponse" == resource_type:
from . import messageheader
return messageheader.MessageHeaderResponse(jsondict)
if "MessageHeaderSource" == resource_type:
from . import messageheader
return messageheader.MessageHeaderSource(jsondict)
if "MessageHeaderDestination" == resource_type:
from . import messageheader
return messageheader.MessageHeaderDestination(jsondict)
if "MolecularSequence" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequence(jsondict)
if "MolecularSequenceStructureVariant" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceStructureVariant(jsondict)
if "MolecularSequenceRepository" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceRepository(jsondict)
if "MolecularSequenceQuality" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceQuality(jsondict)
if "MolecularSequenceVariant" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceVariant(jsondict)
if "MolecularSequenceReferenceSeq" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceReferenceSeq(jsondict)
if "MolecularSequenceQualityRoc" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceQualityRoc(jsondict)
if "MolecularSequenceStructureVariantInner" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceStructureVariantInner(jsondict)
if "MolecularSequenceStructureVariantOuter" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceStructureVariantOuter(jsondict)
if "NamingSystem" == resource_type:
from . import namingsystem
return namingsystem.NamingSystem(jsondict)
if "NamingSystemUniqueId" == resource_type:
from . import namingsystem
return namingsystem.NamingSystemUniqueId(jsondict)
if "NutritionOrder" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrder(jsondict)
if "NutritionOrderEnteralFormula" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrderEnteralFormula(jsondict)
if "NutritionOrderSupplement" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrderSupplement(jsondict)
if "NutritionOrderOralDiet" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrderOralDiet(jsondict)
if "NutritionOrderOralDietTexture" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrderOralDietTexture(jsondict)
if "NutritionOrderOralDietNutrient" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrderOralDietNutrient(jsondict)
if "NutritionOrderEnteralFormulaAdministration" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrderEnteralFormulaAdministration(jsondict)
if "Observation" == resource_type:
from . import observation
return observation.Observation(jsondict)
if "ObservationComponent" == resource_type:
from . import observation
return observation.ObservationComponent(jsondict)
if "ObservationReferenceRange" == resource_type:
from . import observation
return observation.ObservationReferenceRange(jsondict)
if "ObservationDefinition" == resource_type:
from . import observationdefinition
return observationdefinition.ObservationDefinition(jsondict)
if "ObservationDefinitionQualifiedInterval" == resource_type:
from . import observationdefinition
return observationdefinition.ObservationDefinitionQualifiedInterval(jsondict)
if "ObservationDefinitionQuantitativeDetails" == resource_type:
from . import observationdefinition
return observationdefinition.ObservationDefinitionQuantitativeDetails(jsondict)
if "OperationDefinition" == resource_type:
from . import operationdefinition
return operationdefinition.OperationDefinition(jsondict)
if "OperationDefinitionOverload" == resource_type:
from . import operationdefinition
return operationdefinition.OperationDefinitionOverload(jsondict)
if "OperationDefinitionParameter" == resource_type:
from . import operationdefinition
return operationdefinition.OperationDefinitionParameter(jsondict)
if "OperationDefinitionParameterReferencedFrom" == resource_type:
from . import operationdefinition
return operationdefinition.OperationDefinitionParameterReferencedFrom(jsondict)
if "OperationDefinitionParameterBinding" == resource_type:
from . import operationdefinition
return operationdefinition.OperationDefinitionParameterBinding(jsondict)
if "OperationOutcome" == resource_type:
from . import operationoutcome
return operationoutcome.OperationOutcome(jsondict)
if "OperationOutcomeIssue" == resource_type:
from . import operationoutcome
return operationoutcome.OperationOutcomeIssue(jsondict)
if "Organization" == resource_type:
from . import organization
return organization.Organization(jsondict)
if "OrganizationContact" == resource_type:
from . import organization
return organization.OrganizationContact(jsondict)
if "OrganizationAffiliation" == resource_type:
from . import organizationaffiliation
return organizationaffiliation.OrganizationAffiliation(jsondict)
if "Parameters" == resource_type:
from . import parameters
return parameters.Parameters(jsondict)
if "ParametersParameter" == resource_type:
from . import parameters
return parameters.ParametersParameter(jsondict)
if "Patient" == resource_type:
from . import patient
return patient.Patient(jsondict)
if "PatientLink" == resource_type:
from . import patient
return patient.PatientLink(jsondict)
if "PatientCommunication" == resource_type:
from . import patient
return patient.PatientCommunication(jsondict)
if "PatientContact" == resource_type:
from . import patient
return patient.PatientContact(jsondict)
if "PaymentNotice" == resource_type:
from . import paymentnotice
return paymentnotice.PaymentNotice(jsondict)
if "PaymentReconciliation" == resource_type:
from . import paymentreconciliation
return paymentreconciliation.PaymentReconciliation(jsondict)
if "PaymentReconciliationProcessNote" == resource_type:
from . import paymentreconciliation
return paymentreconciliation.PaymentReconciliationProcessNote(jsondict)
if "PaymentReconciliationDetail" == resource_type:
from . import paymentreconciliation
return paymentreconciliation.PaymentReconciliationDetail(jsondict)
if "Person" == resource_type:
from . import person
return person.Person(jsondict)
if "PersonLink" == resource_type:
from . import person
return person.PersonLink(jsondict)
if "PlanDefinition" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinition(jsondict)
if "PlanDefinitionAction" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionAction(jsondict)
if "PlanDefinitionGoal" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionGoal(jsondict)
if "PlanDefinitionGoalTarget" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionGoalTarget(jsondict)
if "PlanDefinitionActionDynamicValue" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionActionDynamicValue(jsondict)
if "PlanDefinitionActionParticipant" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionActionParticipant(jsondict)
if "PlanDefinitionActionRelatedAction" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionActionRelatedAction(jsondict)
if "PlanDefinitionActionCondition" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionActionCondition(jsondict)
if "Practitioner" == resource_type:
from . import practitioner
return practitioner.Practitioner(jsondict)
if "PractitionerQualification" == resource_type:
from . import practitioner
return practitioner.PractitionerQualification(jsondict)
if "PractitionerRole" == resource_type:
from . import practitionerrole
return practitionerrole.PractitionerRole(jsondict)
if "PractitionerRoleNotAvailable" == resource_type:
from . import practitionerrole
return practitionerrole.PractitionerRoleNotAvailable(jsondict)
if "PractitionerRoleAvailableTime" == resource_type:
from . import practitionerrole
return practitionerrole.PractitionerRoleAvailableTime(jsondict)
if "Procedure" == resource_type:
from . import procedure
return procedure.Procedure(jsondict)
if "ProcedureFocalDevice" == resource_type:
from . import procedure
return procedure.ProcedureFocalDevice(jsondict)
if "ProcedurePerformer" == resource_type:
from . import procedure
return procedure.ProcedurePerformer(jsondict)
if "Provenance" == resource_type:
from . import provenance
return provenance.Provenance(jsondict)
if "ProvenanceEntity" == resource_type:
from . import provenance
return provenance.ProvenanceEntity(jsondict)
if "ProvenanceAgent" == resource_type:
from . import provenance
return provenance.ProvenanceAgent(jsondict)
if "Questionnaire" == resource_type:
from . import questionnaire
return questionnaire.Questionnaire(jsondict)
if "QuestionnaireItem" == resource_type:
from . | |
#!/usr/bin/env python
from binascii import hexlify, unhexlify
import time
import requests
import json
from collections import OrderedDict
import os
import sys
import random
from pprint import pprint
COIN = 1000000000000000000
TX_FEE = 0.01
rpcurl_mainnet = 'http://127.0.0.1:6602'
rpcurl_testnet = 'http://127.0.0.1:6604'
mainnet_genesis_privkey = '<KEY>'
mainnet_genesis_addr = '1231kgws0rhjtfewv57jegfe5bp4dncax60szxk8f4y546jsfkap3t5ws'
testnet_genesis_privkey = '141a6728ded4f83f767ea770e3582be497c5088fcc3b9ca248751887534f5197'
testnet_genesis_addr = '1549pyzf8dhx7r4x40k5j80f12btkpqfprjp134bcgcrjn963nzsx57xb'
password = '<PASSWORD>'
GENERATE_ADDR_MODE = 0
CREATE_NODE_MODE = 1
CHECK_MODE = 2
mode = GENERATE_ADDR_MODE
testnet = True
# RPC HTTP request
def call(body):
rpcurl = rpcurl_mainnet
if testnet:
rpcurl = rpcurl_testnet
req = requests.post(rpcurl, json=body)
if mode != GENERATE_ADDR_MODE:
print('DEBUG: request: {}'.format(body))
print('DEBUG: response: {}'.format(req.content))
resp = json.loads(req.content.decode('utf-8'))
return resp.get('result'), resp.get('error')
def get_genesis_privkey():
if testnet:
return testnet_genesis_privkey
else:
return mainnet_genesis_privkey
def get_genesis_addr():
if testnet:
return testnet_genesis_addr
else:
return mainnet_genesis_addr
# RPC: makekeypair
def makekeypair():
result, error = call({
'id': 0,
'jsonrpc': '2.0',
'method': 'makekeypair',
'params': {}
})
if result:
pubkey = result.get('pubkey')
privkey = result.get('privkey')
# print('makekeypair success, pubkey: {}'.format(pubkey))
return pubkey, privkey
else:
raise Exception('makekeypair error: {}'.format(error))
# RPC: getnewkey
def getnewkey():
result, error = call({
'id': 0,
'jsonrpc': '2.0',
'method': 'getnewkey',
'params': {
'passphrase': password
}
})
if result:
pubkey = result
# print('getnewkey success, pubkey: {}'.format(pubkey))
return pubkey
else:
raise Exception('getnewkey error: {}'.format(error))
# RPC: getpubkeyaddress
def getpubkeyaddress(pubkey):
result, error = call({
'id': 0,
'jsonrpc': '2.0',
'method': 'getpubkeyaddress',
'params': {
"pubkey": pubkey
}
})
if result:
address = result
# print('getpubkeyaddress success, address: {}'.format(address))
return address
else:
raise Exception('getpubkeyaddress error: {}'.format(error))
# RPC: getaddresskey
def getaddresskey(address):
result, error = call({
'id': 0,
'jsonrpc': '2.0',
'method': 'getaddresskey',
'params': {
"address": address
}
})
if result:
return result
else:
raise Exception('getaddresskey error: {}'.format(error))
# RPC: importprivkey
def importprivkey(privkey):
result, error = call({
'id': 0,
'jsonrpc': '2.0',
'method': 'importprivkey',
'params': {
'privkey': privkey,
'passphrase': password
}
})
if result:
pubkey = result
# print('importprivkey success, pubkey: {}'.format(pubkey))
return pubkey
else:
raise Exception('importprivkey error: {}'.format(error))
# RPC: getbalance
def getbalance(addr, forkid=None):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'getbalance',
'params': {
'address': addr,
'fork': forkid
}
})
if result:
if len(result) >= 1:
avail = result[0].get('avail')
# print('getbalance success, avail: {}'.format(avail))
return avail
else:
#raise Exception('getbalance result is 0, addr: {}'.format(addr))
print('getbalance result is 0, addr: {}'.format(addr))
return -1
else:
#raise Exception('getbalance fail, addr: {}'.format(addr))
print('getbalance fail, error: {}, addr: {}'.format(error, addr))
return -2
def getbalance_total(addr, forkid=None):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'getbalance',
'params': {
'address': addr,
'fork': forkid
}
})
if result:
if len(result) >= 1:
avail = result[0].get('avail')
locked = result[0].get('locked')
# print('getbalance success, avail: {}'.format(avail))
return avail + locked
else:
#raise Exception('getbalance result is 0, addr: {}'.format(addr))
print('getbalance result is 0, addr: {}'.format(addr))
return -1
else:
#raise Exception('getbalance fail, addr: {}'.format(addr))
print('getbalance fail, error: {}, addr: {}'.format(error, addr))
return -2
def getbalance_locked(addr, forkid=None):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'getbalance',
'params': {
'address': addr,
'fork': forkid
}
})
if result:
if len(result) >= 1:
avail = result[0].get('avail')
locked = result[0].get('locked')
return avail, locked
else:
print('getbalance result is 0, addr: {}'.format(addr))
return -1, 0
else:
print('getbalance fail, error: {}, addr: {}'.format(error, addr))
return -2, 0
# RPC: unlockkey
def unlockkey(key):
call({
'id': 1,
'jsonrpc': '2.0',
'method': 'unlockkey',
'params': {
'pubkey': key,
'passphrase': password
}
})
# RPC: sendfrom
def sendfrom(from_addr, to, amount, fork=None, data=None, contractcode=None, contractparam=None):
unlockkey(from_addr)
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'sendfrom',
'params': {
'from': from_addr,
'to': to,
'amount': str(amount),
'fork': fork,
'data': data,
'contractcode': contractcode,
'contractparam': contractparam
}
})
if result:
txid = result
return txid, 0
else:
print('sendfrom error, error: {}'.format(error))
return "", -1
# RPC: sendfrom_td
def sendfrom_td(from_addr, to, amount, td):
unlockkey(from_addr)
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'sendfrom',
'params': {
'from': from_addr,
'to': to,
'amount': str(amount),
'todata': td
}
})
if result:
txid = result
return txid, 0
else:
print('sendfrom error, error: {}'.format(error))
return "", -1
# RPC: createcontract
def createcontract(from_addr, to_addr, amount, fork, contractcode, contractparam):
unlockkey(from_addr)
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'sendfrom',
'params': {
'from': from_addr,
'to': to_addr,
'amount': str(amount),
'fork': fork,
'contractcode': contractcode,
'contractparam': contractparam
}
})
if result:
txid = result
return txid, 0
else:
print('createcontract sendfrom error, error: {}'.format(error))
return "", -1
# RPC: createmuxcontract
def createmuxcontract(from_addr, to_addr, amount, fork, fdata, contractparam):
unlockkey(from_addr)
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'sendfrom',
'params': {
'from': from_addr,
'to': to_addr,
'amount': str(amount),
'fork': fork,
'fdata': fdata,
'contractparam': contractparam
}
})
if result:
txid = result
return txid, 0
else:
print('createmuxcontract sendfrom error, error: {}'.format(error))
return "", -1
# RPC: makeorigin
def makeorigin(prev, owner, amount, name, symbol, reward, halvecycle):
unlockkey(owner)
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'makeorigin',
'params': {
'prev': prev,
'owner': owner,
'amount': str(amount),
'name': name,
'symbol': symbol,
'reward': str(reward),
'halvecycle': halvecycle
}
})
if result:
forkid = result.get('hash')
data = result.get('hex')
# print('makeorigin success, forkid: {}, data: {}'.format(forkid, data))
return forkid, data
else:
print(error)
raise Exception('makeorgin error: {}'.format(error))
# RPC: addnewtemplate fork
def addforktemplate(redeem, forkid):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'addnewtemplate',
'params': {
'type': 'fork',
'fork': {
'redeem': redeem,
'fork': forkid,
}
}
})
if result:
addr = result
return addr
else:
raise Exception('addforktemplate error: {}'.format(error))
# RPC: addnewtemplate delegate
def adddelegatetemplate(delegate, owner, rewardratio):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'addnewtemplate',
'params': {
'type': 'delegate',
'delegate': {
'delegate': delegate,
'owner': owner,
'rewardratio': rewardratio
}
}
})
if result:
addr = result
return addr
else:
raise Exception('adddelegatetemplate error: {}'.format(error))
# RPC: addnewtemplate vote
def addvotetemplate(delegate, owner, rewardmode):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'addnewtemplate',
'params': {
'type': 'vote',
'vote': {
'delegate': delegate,
'owner': owner,
'rewardmode': rewardmode
}
}
})
if result:
addr = result
return addr
else:
raise Exception('adddelegatetemplate error: {}'.format(error))
# RPC: maketemplate vote
def makevotetemplate(delegate, owner, rewardmode):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'maketemplate',
'params': {
'type': 'vote',
'vote': {
'delegate': delegate,
'owner': owner,
'rewardmode': rewardmode
}
}
})
if result:
addr = result.get('address')
hex = result.get('hex')
return addr,hex
else:
raise Exception('maketemplate error: {}'.format(error))
# RPC: removetemplate
def removetemplate(address):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'removetemplate',
'params': {
'address': address
}
})
if result:
return result
else:
#raise Exception('removetemplate fail, address: {}, error: {}'.format(address, error))
return "fail"
# RPC: getforkheight
def getforkheight(forkid=None):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'getforkheight',
'params': {
'fork': forkid,
}
})
if result:
height = result
# print('getforkheight success, height: {}'.format(height))
return height
else:
return None
# RPC: getblockhash
def getblockhash(height, forkid=None):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'getblockhash',
'params': {
'height': height,
'fork': forkid,
}
})
if result:
block_hash = result
# print('getblockhash success, block hash: {}'.format(block_hash))
return block_hash
else:
return None
# RPC: getblock
def getblock(blockid):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'getblock',
'params': {
'block': blockid,
}
})
if result:
block = result
# print('getblock success, block: {}'.format(block))
return block
else:
raise Exception('getblock error: {}'.format(error))
# RPC: getblockdetail
def getblockdetail(blockid):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'getblockdetail',
'params': {
'block': blockid,
}
})
if result:
block = result
# print('getblockdetail success, block: {}'.format(block))
return block
else:
raise Exception('getblockdetail error: {}'.format(error))
# RPC: gettransaction
def gettransaction(txid):
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'gettransaction',
'params': {
'txid': txid,
}
})
if result:
tx = result['transaction']
# print('gettransaction success, tx: {}'.format(tx))
return tx
else:
raise Exception('gettransaction error: {}'.format(error))
# RPC: getgenealogy
def getgenealogy(forkid):
result, _ = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'getgenealogy',
'params': {
'fork': forkid,
}
})
if result:
return True
else:
return False
# RPC: funcsign
def funcsign(funcname):
result, _ = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'funcsign',
'params': {
'funcname': funcname,
}
})
if result:
return result
else:
raise Exception('funcsign error: {}'.format(error))
# RPC: getpubkey
def getpubkey(address):
result, _ = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'getpubkey',
'params': {
'privkeyaddress': address,
}
})
if result:
return result
else:
raise Exception('getpubkey error: {}'.format(error))
# RPC: reversehex
def reversehex(value):
result, _ = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'reversehex',
'params': {
'hex': value,
}
})
if result:
return result
else:
raise Exception('reversehex error: {}'.format(error))
# RPC: callcontract
def callcontract(from_addr, to, amount, fork=None, contractparam=None):
unlockkey(from_addr)
result, error = call({
'id': 1,
'jsonrpc': '2.0',
'method': 'callcontract',
'params': {
'from': from_addr,
'to': to,
'amount': str(amount),
'fork': fork,
'contractparam': contractparam
}
})
if | |
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import pandas as pd
import torch as to
from botorch.acquisition import PosteriorMean
from botorch.models import SingleTaskGP
from botorch.fit import fit_gpytorch_model
from botorch.optim import optimize_acqf
from gpytorch.constraints import GreaterThan
from gpytorch.mlls import ExactMarginalLogLikelihood
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
from typing import Sequence
import pyrado
from matplotlib import pyplot as plt
from pyrado.plotting.heatmap import draw_heatmap
from pyrado.utils.input_output import print_cbt
def render_singletask_gp(
ax: [plt.Axes, Axes3D, Sequence[plt.Axes]],
data_x: to.Tensor,
data_y: to.Tensor,
idcs_sel: list,
data_x_min: to.Tensor = None,
data_x_max: to.Tensor = None,
x_label: str = "",
y_label: str = "",
z_label: str = "",
min_gp_obsnoise: float = None,
resolution: int = 201,
num_stds: int = 2,
alpha: float = 0.3,
color: chr = None,
curve_label: str = "mean",
heatmap_cmap: colors.Colormap = None,
show_legend_posterior: bool = True,
show_legend_std: bool = False,
show_legend_data: bool = True,
legend_data_cmap: colors.Colormap = None,
colorbar_label: str = None,
title: str = None,
render3D: bool = True,
) -> plt.Figure:
"""
Fit the GP posterior to the input data and plot the mean and std as well as the data points.
There are 3 options: 1D plot (infered by data dimensions), 2D plot
.. note::
If you want to have a tight layout, it is best to pass axes of a figure with `tight_layout=True` or
`constrained_layout=True`.
:param ax: axis of the figure to plot on, only in case of a 2-dim heat map plot provide 2 axis
:param data_x: data to plot on the x-axis
:param data_y: data to process and plot on the y-axis
:param idcs_sel: selected indices of the input data
:param data_x_min: explicit minimum value for the evaluation grid, by default this value is extracted from `data_x`
:param data_x_max: explicit maximum value for the evaluation grid, by default this value is extracted from `data_x`
:param x_label: label for x-axis
:param y_label: label for y-axis
:param z_label: label for z-axis (3D plot only)
:param min_gp_obsnoise: set a minimal noise value (normalized) for the GP, if `None` the GP has no measurement noise
:param resolution: number of samples for the input (corresponds to x-axis resolution of the plot)
:param num_stds: number of standard deviations to plot around the mean
:param alpha: transparency (alpha-value) for the std area
:param color: color (e.g. 'k' for black), `None` invokes the default behavior
:param curve_label: label for the mean curve (1D plot only)
:param heatmap_cmap: color map forwarded to `draw_heatmap()` (2D plot only), `None` to use Pyrado's default
:param show_legend_posterior: flag if the legend entry for the posterior should be printed (affects mean and std)
:param show_legend_std: flag if a legend entry for the std area should be printed
:param show_legend_data: flag if a legend entry for the individual data points should be printed
:param legend_data_cmap: color map for the sampled points, default is 'binary'
:param colorbar_label: label for the color bar (2D plot only)
:param title: title displayed above the figure, set to `None` to suppress the title
:param render3D: use 3D rendering if possible
:return: handle to the resulting figure
"""
if data_x.ndim != 2:
raise pyrado.ShapeErr(msg="The GP's input data needs to be of shape num_samples x dim_input!")
data_x = data_x[:, idcs_sel] # forget the rest
dim_x = data_x.shape[1] # samples are along axis 0
if data_y.ndim != 2:
raise pyrado.ShapeErr(given=data_y, expected_match=to.Size([data_x.shape[0], 1]))
if legend_data_cmap is None:
legend_data_cmap = plt.get_cmap("binary")
# Project to normalized input and standardized output
if data_x_min is None or data_x_max is None:
data_x_min, data_x_max = to.min(data_x, dim=0)[0], to.max(data_x, dim=0)[0]
data_y_mean, data_y_std = to.mean(data_y, dim=0), to.std(data_y, dim=0)
data_x = (data_x - data_x_min) / (data_x_max - data_x_min)
data_y = (data_y - data_y_mean) / data_y_std
# Create and fit the GP model
gp = SingleTaskGP(data_x, data_y)
if min_gp_obsnoise is not None:
gp.likelihood.noise_covar.register_constraint("raw_noise", GreaterThan(min_gp_obsnoise))
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
mll.train()
fit_gpytorch_model(mll)
print_cbt("Fitted the SingleTaskGP.", "g")
argmax_pmean_norm, argmax_pmean_val_stdzed = optimize_acqf(
acq_function=PosteriorMean(gp),
bounds=to.stack([to.zeros(dim_x), to.ones(dim_x)]),
q=1,
num_restarts=500,
raw_samples=1000,
)
# Project back
argmax_posterior = argmax_pmean_norm * (data_x_max - data_x_min) + data_x_min
argmax_pmean_val = argmax_pmean_val_stdzed * data_y_std + data_y_mean
print_cbt(f"Converged to argmax of the posterior mean: {argmax_posterior.numpy()}", "g")
mll.eval()
gp.eval()
if dim_x == 1:
# Evaluation grid
x_grid = np.linspace(min(data_x), max(data_x), resolution, endpoint=True).flatten()
x_grid = to.from_numpy(x_grid)
# Mean and standard deviation of the surrogate model
posterior = gp.posterior(x_grid)
mean = posterior.mean.detach().flatten()
std = to.sqrt(posterior.variance.detach()).flatten()
# Project back from normalized input and standardized output
x_grid = x_grid * (data_x_max - data_x_min) + data_x_min
data_x = data_x * (data_x_max - data_x_min) + data_x_min
data_y = data_y * data_y_std + data_y_mean
mean = mean * data_y_std + data_y_mean
std *= data_y_std # double-checked with posterior.mvn.confidence_region()
# Plot the curve
plt.fill_between(
x_grid.numpy(),
mean.numpy() - num_stds * std.numpy(),
mean.numpy() + num_stds * std.numpy(),
alpha=alpha,
color=color,
)
ax.plot(x_grid.numpy(), mean.numpy(), color=color)
# Plot the queried data points
scat_plot = ax.scatter(
data_x.numpy().flatten(),
data_y.numpy().flatten(),
marker="o",
c=np.arange(data_x.shape[0], dtype=np.int),
cmap=legend_data_cmap,
)
if show_legend_data:
scat_legend = ax.legend(
*scat_plot.legend_elements(fmt="{x:.0f}"), # integer formatter
bbox_to_anchor=(0.0, 1.1, 1.0, -0.1),
title="query points",
ncol=data_x.shape[0],
loc="upper center",
mode="expand",
borderaxespad=0.0,
handletextpad=-0.5,
)
ax.add_artist(scat_legend)
# Increase vertical space between subplots when printing the data labels
# plt.tight_layout(pad=2.) # ignore argument
# plt.subplots_adjust(hspace=0.6)
# Plot the argmax of the posterior mean
# ax.scatter(argmax_posterior.item(), argmax_pmean_val, c='darkorange', marker='o', s=60, label='argmax')
ax.axvline(argmax_posterior.item(), c="darkorange", lw=1.5, label="argmax")
if show_legend_posterior:
ax.add_artist(ax.legend(loc="lower right"))
elif dim_x == 2:
# Create mesh grid matrices from x and y vectors
# x0_grid = to.linspace(min(data_x[:, 0]), max(data_x[:, 0]), resolution)
# x1_grid = to.linspace(min(data_x[:, 1]), max(data_x[:, 1]), resolution)
x0_grid = to.linspace(0, 1, resolution)
x1_grid = to.linspace(0, 1, resolution)
x0_mesh, x1_mesh = to.meshgrid([x0_grid, x1_grid])
x0_mesh, x1_mesh = x0_mesh.t(), x1_mesh.t() # transpose not necessary but makes identical mesh as np.meshgrid
# Mean and standard deviation of the surrogate model
x_test = to.stack([x0_mesh.reshape(resolution ** 2, 1), x1_mesh.reshape(resolution ** 2, 1)], -1).squeeze(1)
posterior = gp.posterior(x_test) # identical to gp.likelihood(gp(x_test))
mean = posterior.mean.detach().reshape(resolution, resolution)
std = to.sqrt(posterior.variance.detach()).reshape(resolution, resolution)
# Project back from normalized input and standardized output
data_x = data_x * (data_x_max - data_x_min) + data_x_min
data_y = data_y * data_y_std + data_y_mean
mean_raw = mean * data_y_std + data_y_mean
std_raw = std * data_y_std
if render3D:
# Project back from normalized input and standardized output (custom for 3D)
x0_mesh = x0_mesh * (data_x_max[0] - data_x_min[0]) + data_x_min[0]
x1_mesh = x1_mesh * (data_x_max[1] - data_x_min[1]) + data_x_min[1]
lower = mean_raw - num_stds * std_raw
upper = mean_raw + num_stds * std_raw
# Plot a 2D surface in 3D
ax.plot_surface(x0_mesh.numpy(), x1_mesh.numpy(), mean_raw.numpy())
ax.plot_surface(x0_mesh.numpy(), x1_mesh.numpy(), lower.numpy(), color="r", alpha=alpha)
ax.plot_surface(x0_mesh.numpy(), x1_mesh.numpy(), upper.numpy(), color="r", alpha=alpha)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(z_label)
# Plot the queried data points
| |
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='fimValid')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='fimValid', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='fimValid'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='fimValid', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class fimValid
class TDadosEstab(GeneratedsSuper):
"""Detalhamento das informações do estabelecimento/obra"""
subclass = None
superclass = None
def __init__(self, cnaePrep=None, aliqGilrat=None, infoCaepf=None, infoObra=None, infoTrab=None):
self.original_tagname_ = None
self.cnaePrep = cnaePrep
self.aliqGilrat = aliqGilrat
self.infoCaepf = infoCaepf
self.infoObra = infoObra
self.infoTrab = infoTrab
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TDadosEstab)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TDadosEstab.subclass:
return TDadosEstab.subclass(*args_, **kwargs_)
else:
return TDadosEstab(*args_, **kwargs_)
factory = staticmethod(factory)
def get_cnaePrep(self): return self.cnaePrep
def set_cnaePrep(self, cnaePrep): self.cnaePrep = cnaePrep
def get_aliqGilrat(self): return self.aliqGilrat
def set_aliqGilrat(self, aliqGilrat): self.aliqGilrat = aliqGilrat
def get_infoCaepf(self): return self.infoCaepf
def set_infoCaepf(self, infoCaepf): self.infoCaepf = infoCaepf
def get_infoObra(self): return self.infoObra
def set_infoObra(self, infoObra): self.infoObra = infoObra
def get_infoTrab(self): return self.infoTrab
def set_infoTrab(self, infoTrab): self.infoTrab = infoTrab
def hasContent_(self):
if (
self.cnaePrep is not None or
self.aliqGilrat is not None or
self.infoCaepf is not None or
self.infoObra is not None or
self.infoTrab is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TDadosEstab', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TDadosEstab')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TDadosEstab')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TDadosEstab', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TDadosEstab'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TDadosEstab', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.cnaePrep is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scnaePrep>%s</%scnaePrep>%s' % (namespace_, self.gds_format_integer(self.cnaePrep, input_name='cnaePrep'), namespace_, eol_))
if self.aliqGilrat is not None:
self.aliqGilrat.export(outfile, level, namespace_, name_='aliqGilrat', pretty_print=pretty_print)
if self.infoCaepf is not None:
self.infoCaepf.export(outfile, level, namespace_, name_='infoCaepf', pretty_print=pretty_print)
if self.infoObra is not None:
self.infoObra.export(outfile, level, namespace_, name_='infoObra', pretty_print=pretty_print)
if self.infoTrab is not None:
self.infoTrab.export(outfile, level, namespace_, name_='infoTrab', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'cnaePrep':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'cnaePrep')
self.cnaePrep = ival_
elif nodeName_ == 'aliqGilrat':
obj_ = aliqGilrat.factory()
obj_.build(child_)
self.aliqGilrat = obj_
obj_.original_tagname_ = 'aliqGilrat'
elif nodeName_ == 'infoCaepf':
obj_ = infoCaepf.factory()
obj_.build(child_)
self.infoCaepf = obj_
obj_.original_tagname_ = 'infoCaepf'
elif nodeName_ == 'infoObra':
obj_ = infoObra.factory()
obj_.build(child_)
self.infoObra = obj_
obj_.original_tagname_ = 'infoObra'
elif nodeName_ == 'infoTrab':
obj_ = infoTrab.factory()
obj_.build(child_)
self.infoTrab = obj_
obj_.original_tagname_ = 'infoTrab'
# end class TDadosEstab
class cnaePrep(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, cnaePrep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if cnaePrep.subclass:
return cnaePrep.subclass(*args_, **kwargs_)
else:
return cnaePrep(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='cnaePrep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('cnaePrep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cnaePrep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='cnaePrep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='cnaePrep'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='cnaePrep', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cnaePrep
class aliqGilrat(GeneratedsSuper):
"""Informações de Apuração da alíquota Gilrat do Estabelecimento"""
subclass = None
superclass = None
def __init__(self, aliqRat=None, fap=None, aliqRatAjust=None, procAdmJudRat=None, procAdmJudFap=None):
self.original_tagname_ = None
self.aliqRat = aliqRat
self.fap = fap
self.aliqRatAjust = aliqRatAjust
self.procAdmJudRat = procAdmJudRat
self.procAdmJudFap = procAdmJudFap
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, aliqGilrat)
if subclass is not None:
return subclass(*args_, **kwargs_)
if aliqGilrat.subclass:
return aliqGilrat.subclass(*args_, **kwargs_)
else:
return aliqGilrat(*args_, **kwargs_)
factory = staticmethod(factory)
def get_aliqRat(self): return self.aliqRat
def set_aliqRat(self, aliqRat): self.aliqRat = aliqRat
def get_fap(self): return self.fap
def set_fap(self, fap): self.fap = fap
def get_aliqRatAjust(self): return self.aliqRatAjust
def set_aliqRatAjust(self, aliqRatAjust): self.aliqRatAjust = aliqRatAjust
def get_procAdmJudRat(self): return self.procAdmJudRat
def set_procAdmJudRat(self, procAdmJudRat): self.procAdmJudRat = procAdmJudRat
def get_procAdmJudFap(self): return self.procAdmJudFap
def set_procAdmJudFap(self, procAdmJudFap): self.procAdmJudFap = procAdmJudFap
def hasContent_(self):
if (
self.aliqRat is not None or
self.fap is not None or
self.aliqRatAjust is not None or
self.procAdmJudRat is not None or
self.procAdmJudFap is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='aliqGilrat', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('aliqGilrat')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='aliqGilrat')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='aliqGilrat', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='aliqGilrat'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='aliqGilrat', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.aliqRat is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%saliqRat>%s</%saliqRat>%s' % (namespace_, self.gds_format_integer(self.aliqRat, input_name='aliqRat'), namespace_, eol_))
if self.fap is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfap>%s</%sfap>%s' % (namespace_, self.gds_format_float(self.fap, input_name='fap'), namespace_, eol_))
if self.aliqRatAjust is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%saliqRatAjust>%s</%saliqRatAjust>%s' % (namespace_, self.gds_format_float(self.aliqRatAjust, input_name='aliqRatAjust'), namespace_, eol_))
if self.procAdmJudRat is not None:
self.procAdmJudRat.export(outfile, level, namespace_, name_='procAdmJudRat', pretty_print=pretty_print)
if self.procAdmJudFap is not None:
self.procAdmJudFap.export(outfile, level, namespace_, name_='procAdmJudFap', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'aliqRat':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'aliqRat')
self.aliqRat = ival_
elif nodeName_ == 'fap':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'fap')
self.fap = fval_
elif nodeName_ == 'aliqRatAjust':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'aliqRatAjust')
self.aliqRatAjust = fval_
elif nodeName_ == 'procAdmJudRat':
obj_ = procAdmJudRat.factory()
obj_.build(child_)
self.procAdmJudRat = obj_
obj_.original_tagname_ = 'procAdmJudRat'
elif nodeName_ == 'procAdmJudFap':
obj_ = procAdmJudFap.factory()
obj_.build(child_)
self.procAdmJudFap = obj_
obj_.original_tagname_ = 'procAdmJudFap'
# end class aliqGilrat
class aliqRat(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, aliqRat)
if subclass is not None:
return subclass(*args_, **kwargs_)
if aliqRat.subclass:
return aliqRat.subclass(*args_, **kwargs_)
else:
return aliqRat(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
| |
<reponame>JulyKikuAkita/PythonPrac<filename>cs15211/CreateMaximumNumber.py<gh_stars>1-10
__source__ = 'https://leetcode.com/problems/create-maximum-number/#/description'
# https://github.com/kamyu104/LeetCode/blob/master/Python/create-maximum-number.py
# Time: O(k * (m + n + k)) ~ O(k * (m + n + k^2))
# Space: O(m + n + k^2)
#
# Description: Leetcode # 321. Create Maximum Number
#
# Given two arrays of length m and n with digits 0-9 representing two numbers.
# Create the maximum number of length k <= m + n from digits of the two.
# The relative order of the digits from the same array must be preserved.
# Return an array of the k digits. You should try to optimize your time
# and space complexity.
#
# Example 1:
# nums1 = [3, 4, 6, 5]
# nums2 = [9, 1, 2, 5, 8, 3]
# k = 5
# return [9, 8, 6, 5, 3]
#
# Example 2:
# nums1 = [6, 7]
# nums2 = [6, 0, 4]
# k = 5
# return [6, 7, 6, 0, 4]
#
# Example 3:
# nums1 = [3, 9]
# nums2 = [8, 9]
# k = 3
# return [9, 8, 9]
#
# Companies
# Google
# Related Topics
# Dynamic Programming Greedy
# Similar Questions
# Remove K Digits
#
import unittest
# DP + Greedy solution. (280ms)
class Solution(object):
def maxNumber(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[int]
"""
def get_max_digits(nums, start, end, max_digits):
max_digits[end] = max_digit(nums, end)
for i in reversed(xrange(start, end)):
max_digits[i] = delete_digit(max_digits[i + 1])
def max_digit(nums, k):
drop = len(nums) - k
res = []
for num in nums:
while drop and res and res[-1] < num:
res.pop()
drop -= 1
res.append(num)
return res[:k]
def delete_digit(nums):
res = list(nums)
for i in xrange(len(res)):
if i == len(res) - 1 or res[i] < res[i + 1]:
res = res[:i] + res[i+1:]
break
return res
def merge(a, b):
return [max(a, b).pop(0) for _ in xrange(len(a)+len(b))]
m, n = len(nums1), len(nums2)
max_digits1, max_digits2 = [[] for _ in xrange(k + 1)], [[] for _ in xrange(k + 1)]
get_max_digits(nums1, max(0, k - n), min(k, m), max_digits1)
get_max_digits(nums2, max(0, k - m), min(k, n), max_digits2)
return max(merge(max_digits1[i], max_digits2[k-i]) \
for i in xrange(max(0, k - n), min(k, m) + 1))
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
Thought:
https://www.hrwhisper.me/leetcode-create-maximum-number/
Many of the posts have the same algorithm. In short we can first solve 2 simpler problem
Create the maximum number of one array
Create the maximum number of two array using all of their digits.
The algorithm is O((m+n)^3) in the worst case. It runs in 22 ms.
# 9ms 91.96%
public class Solution {
public int[] maxNumber(int[] nums1, int[] nums2, int k) {
int n = nums1.length;
int m = nums2.length;
int[] ans = new int[k];
for (int i = Math.max(0, k - m); i <= k && i <= n; ++i) {
int[] candidate = merge(maxArray(nums1, i), maxArray(nums2, k - i), k);
if (greater(candidate, 0, ans, 0)) ans = candidate;
}
return ans;
}
public boolean greater(int[] nums1, int i, int[] nums2, int j) {
while (i < nums1.length && j < nums2.length && nums1[i] == nums2[j]) {
i++;
j++;
}
return j == nums2.length || (i < nums1.length && nums1[i] > nums2[j]);
}
private int[] merge(int[] nums1, int[] nums2, int k) {
int[] ans = new int[k];
for (int i = 0, j = 0, r = 0; r < k; ++r)
ans[r] = greater(nums1, i, nums2, j) ? nums1[i++] : nums2[j++];
return ans;
}
public int[] maxArray(int[] nums, int k) {
int n = nums.length;
int[] ans = new int[k];
for (int i = 0, j = 0; i < n; i++) {
while(n - i + j > k && j > 0 && ans[j - 1] < nums[i]) j--;
if (j < k) ans[j++] = nums[i];
}
return ans;
}
}
The basic idea:
To create max number of length k from two arrays, you need to create max number of length i from array one
and max number of length k-i from array two, then combine them together.
After trying all possible i, you will get the max number created from two arrays.
Optimization:
Suppose nums1 = [3, 4, 6, 5], nums2 = [9, 1, 2, 5, 8, 3],
the maximum number you can create from nums1 is [6, 5] with length 2.
For nums2, it's [9, 8, 3] with length 3. Merging the two sequence,
we have [9, 8, 6, 5, 3], which is the max number we can create from two arrays without length constraint.
If the required length k<=5, we can simply trim the result to required length from front.
For instance, if k=3, then [9, 8, 6] is the result.
Suppose we need to create max number with length 2 from num = [4, 5, 3, 2, 1, 6, 0, 8].
The simple way is to use a stack, first we push 4 and have stack [4], then comes 5 > 4,
we pop 4 and push 5, stack becomes [5], 3 < 5, we push 3, stack becomes [5, 3].
Now we have the required length 2, but we need to keep going through the array in case a larger number comes, 2 < 3,
we discard it instead of pushing it because the stack already grows to required size 2. 1 < 3, we discard it. 6 > 3,
we pop 3, since 6 > 5 and there are still elements left, we can continue to pop 5 and push 6, the stack becomes [6],
since 0 < 6, we push 0, the stack becomes [6, 0], the stack grows to required length again. Since 8 > 0, we pop 0,
although 8 > 6, we can't continue to pop 6 since there is only one number, which is 8, left, if we pop 6 and push 8,
we can't get to length 2, so we push 8 directly, the stack becomes [6, 8].
In the basic idea, we mentioned trying all possible length i.
If we create max number for different i from scratch each time,
that would be a waste of time. Suppose num = [4, 9, 3, 2, 1, 8, 7, 6],
we need to create max number with length from 1 to 8. For i==8, result is the original array.
For i==7, we need to drop 1 number from array, since 9 > 4, we drop 4, the result is [9, 3, 2, 1, 8, 7, 6].
For i==6, we need to drop 1 more number, 3 < 9, skip, 2 < 3, skip, 1 < 2, skip, 8 > 1, we drop 1,
the result is [9, 3, 2, 8, 7, 6]. For i==5, we need to drop 1 more, but this time, we needn't check from beginning,
during last scan, we already know [9, 3, 2] is monotonically non-increasing, so we check 8 directly, since 8 > 2,
we drop 2, the result is [9, 3, 8, 7, 6]. For i==4, we start with 8, 8 > 3, we drop 3, the result is [9, 8, 7, 6].
For i==3, we start with 8, 8 < 9, skip, 7 < 8, skip, 6 < 7, skip, by now,
we've got maximum number we can create from num without length constraint.
So from now on, we can drop a number from the end each time.
The result is [9, 8, 7], For i==2, we drop last number 7 and have [9, 8]. For i==1, we drop last number 8 and have [9].
Input:
[2,5,6,4,4,0]
[7,3,8,0,6,5,7,6,2]
15
Output:
[7,3,8,2,5,6,4,4,0,0,6,5,7,6,2]
Expected:
[7,3,8,2,5,6,4,4,0,6,5,7,6,2,0]
# 7ms 99.04%
class Solution {
public int[] maxNumber(int[] nums1, int[] nums2, int k) {
int[] result = new int[k];
Arrays.fill(result, Integer.MIN_VALUE);
for (int i = Math.max(0, k - nums2.length); i <= Math.min(nums1.length, k); i++) {
int[] cur1 = maxKNumbers(nums1, i);
int[] cur2 = maxKNumbers(nums2, k - i);
int[] cur = merge(cur1, cur2);
if (compare(result, 0, cur, 0) < 0) {
result = cur;
}
}
return result;
}
private int[] maxKNumbers(int[] nums, int k) {
int[] result = new int[k];
int end = -1;
for (int i = 0; i < nums.length; i++) {
while (end >= 0 && result[end] < nums[i] && k - end | |
# -*- coding: utf-8 -*-
"""
Name : grdio.py
Created on : 2018/11/24 08:57
Author : <NAME> <<EMAIL>>
Affiliation : Institute of Geophysics, CEA.
Version : 0.1.0
Copyright : Copyright (C) 2018-2020 GEOIST Development Team. All Rights Reserved.
License : Distributed under the MIT License. See LICENSE.txt for more info.
Github : https://igp-gravity.github.io/
Description : Application for processing grid data of potential dataset.
"""
import struct
import numpy as np
import scipy.interpolate as interp
from matplotlib import pyplot as plt
import warnings
def _check_area(area):
"""
Check that the area argument is valid.
For example, the west limit should not be greater than the east limit.
"""
x1, x2, y1, y2 = area
assert x1 <= x2, \
"Invalid area dimensions {}, {}. x1 must be < x2.".format(x1, x2)
assert y1 <= y2, \
"Invalid area dimensions {}, {}. y1 must be < y2.".format(y1, y2)
def regular(area, shape, z=None):
"""
Create a regular grid.
The x directions is North-South and y East-West. Imagine the grid as a
matrix with x varying in the lines and y in columns.
Returned arrays will be flattened to 1D with ``numpy.ravel``.
Parameters:
* area
``(x1, x2, y1, y2)``: Borders of the grid
* shape
Shape of the regular grid, ie ``(nx, ny)``.
* z
Optional. z coordinate of the grid points. If given, will return an
array with the value *z*.
Returns:
* ``[x, y]``
Numpy arrays with the x and y coordinates of the grid points
* ``[x, y, z]``
If *z* given. Numpy arrays with the x, y, and z coordinates of the grid
points
Examples:
>>> x, y = regular((0, 10, 0, 5), (5, 3))
>>> print(x)
[ 0. 0. 0. 2.5 2.5 2.5 5. 5. 5. 7.5 7.5 7.5
10. 10. 10. ]
>>> print(x.reshape((5, 3)))
[[ 0. 0. 0. ]
[ 2.5 2.5 2.5]
[ 5. 5. 5. ]
[ 7.5 7.5 7.5]
[ 10. 10. 10. ]]
"""
nx, ny = shape
x1, x2, y1, y2 = area
_check_area(area)
xs = np.linspace(x1, x2, nx)
ys = np.linspace(y1, y2, ny)
# Must pass ys, xs in this order because meshgrid uses the first argument
# for the columns
arrays = np.meshgrid(ys, xs)[::-1]
if z is not None:
arrays.append(z*np.ones(nx*ny, dtype=np.float))
return [i.ravel() for i in arrays]
def spacing(area, shape):
"""
Returns the spacing between grid nodes
Parameters:
* area
``(x1, x2, y1, y2)``: Borders of the grid
* shape
Shape of the regular grid, ie ``(nx, ny)``.
Returns:
* ``[dx, dy]``
Spacing the y and x directions
Examples:
>>> print(spacing((0, 10, 0, 20), (11, 11)))
[1.0, 2.0]
>>> print(spacing((0, 10, 0, 20), (11, 21)))
[1.0, 1.0]
>>> print(spacing((0, 10, 0, 20), (5, 21)))
[2.5, 1.0]
>>> print(spacing((0, 10, 0, 20), (21, 21)))
[0.5, 1.0]
"""
x1, x2, y1, y2 = area
nx, ny = shape
dx = (x2 - x1)/(nx - 1)
dy = (y2 - y1)/(ny - 1)
return [dx, dy]
class grddata(object):
"""
Grid Data Object
Attributes
----------
data : numpy masked array
array to contain raster data
xmin : float
min value X coordinate of raster grid
ymin : float
min value Y coordinate of raster grid
xdim : float
x-dimension of grid cell
ydim : float
y-dimension of grid cell
typeofdata : int
number of datatype
dataname : str
data name or id
rows : int
number of rows for each raster grid/band
cols : int
number of columns for each raster grid/band
nullvalue : float
grid null or nodata value
norm : dictionary
normalized data
gtr : tuple
projection information
wkt : str
projection information
units : str
description of units to be used with color bars
"""
def __init__(self):
self.data = np.ma.array([])
self.data0 = np.array([])
self.xmin = 0.0 # min value of X coordinate
self.ymin = 0.0 # min value of Y coordinate
self.xdim = 1.0
self.ydim = 1.0
self.dmin = 0.0
self.dmax = 0.0
self.typeofdata = 1 # 1- grav or 2- mag
self.dataname = '' #name of data
self.rows = -1
self.cols = -1
self.nullvalue = 1e+20
self.norm = {}
self.gtr = (0.0, 1.0, 0.0, 0.0, -1.0)
self.wkt = ''
self.units = ''
def fill_nulls(self, method='nearest'):
"""
Fill in the NaNs or masked values on interpolated points using nearest
neighbors.
method='nearest' or 'linear' or 'cubic'
"""
if np.ma.is_masked(self.data):
nans = self.data.mask
else:
nans = np.isnan(self.data)
nx,ny = nans.shape
ns = nans.reshape(nx*ny)
shape = (nx, ny)
xmax = self.xmin + (self.cols-1)*self.xdim
ymax = self.ymin + (self.rows-1)*self.ydim
area = (self.xmin, xmax, self.ymin, ymax)
x, y = regular(area, shape)
dtmp = self.data.copy() #数组copy,不改变源数组
dtmp1 = dtmp.reshape(nx*ny)
ns1 = (ns == False)
dtmp1[ns] = interp.griddata((x[ns1], y[ns1]), dtmp1[ns1], (x[ns], y[ns]),
method).ravel()
self.data0 = dtmp1.reshape(nx,ny)
def grd2xyz(self, flag = True):
"""
Return x,y,z 1-D array data from 2-D grid array.
Parameters:
flag : True - Output Grid Grid
False - Output Bak Grid Grid
Returns:
x,y,z 1-D array data
"""
nx,ny = self.data.shape
xmax = self.xmin + (self.cols-1)*self.xdim
ymax = self.ymin + (self.rows-1)*self.ydim
shape = (nx, ny)
area = (self.xmin, xmax, self.ymin, ymax)
x, y = regular(area, shape)
if flag:
z = self.data.reshape(nx*ny)
else:
z = self.data0.reshape(nx*ny)
return (x, y, z)
def load_grd(self,fname,*args,**kwargs):
with open(fname,'rb') as f:
tmp = f.read(4)
if tmp == b'DSAA':
self._load_surfer_ascii(fname,*args,**kwargs)
elif tmp == b'DSBB':
self._load_surfer_dsbb(fname,*args,**kwargs)
elif tmp == b'ncol':
self.load_ascii(fname,*args,**kwargs)
else:
raise ValueError("Unrecognized grd format.")
def load_surfer(self, fname, *args, **kwargs):
"""
Read data from a Surfer grid file.
Parameters:
* fname : str
Name of the Surfer grid file
* dtype : numpy dtype object or string
The type of variable used for the data. Default is numpy.float64 for
ascii data and is '=f' for binary data. Use numpy.float32 if the
data are large and precision is not an issue.
* header_format : header format (excluding the leading 'DSBB') following
the convention of the struct module. Only used for binary data.
Returns:
"""
with open(fname,'rb') as f:
tmp = f.read(4)
if tmp == b'DSAA':
self._load_surfer_ascii(fname,*args,**kwargs)
elif tmp == b'DSBB':
self._load_surfer_dsbb(fname,*args,**kwargs)
else:
raise ValueError("Unknown header info {}.".format(tmp)
+"Only DSAA or DSBB could be recognized.")
def _load_surfer_dsbb(self,fname,dtype='=f',header_format='cccchhdddddd'):
"""
Read data from a Surfer DSBB grid file.
Parameters:
* fname : str
Name of the Surfer grid file
* dtype : numpy dtype object or string
The type of variable used for the data. Default is numpy.float64. Use
numpy.float32 if the data are large and precision is not an issue.
* header_format : header format following the convention of the
struct module.
Returns:
"""
with open(fname,'rb') as f:
# read header
header_len = struct.calcsize(header_format)
header = f.read(header_len)
# read data
data = b''
for x in f:
data += x
# unpack header
s = struct.Struct(header_format)
(tmp,tmp,tmp,tmp,self.cols,self.rows,self.xmin,self.xmax,
self.ymin,self.ymax,self.dmin,self.dmax) = s.unpack(header)
if self.cols<=0 and self.rows<=0:
raise ValueError("Array shape can't be infered.")
# convert data to numpy array
self.data = np.frombuffer(data,dtype=dtype).reshape(self.cols,self.rows)
self.data = np.ma.MaskedArray(self.data)
self.cols,self.rows = self.data.shape
if self.data.min()+1<self.dmin or self.data.max()-1>self.dmax:
warnings.warn("(min(z),max(z)) in the data is incompatible "
+"with (zmin,zmax) in the header. "
+"Please check whether the 'dtype' argument is "
+"correct.(default is '=f')")
self.xdim = (self.xmax-self.xmin)/(self.rows-1)
self.ydim = (self.ymax-self.ymin)/(self.cols-1)
def _load_surfer_ascii(self, fname, dtype='float64'):
"""
Read data from a Surfer ASCII grid file.
Parameters:
* fname : str
Name of the Surfer grid file
* dtype : numpy dtype object or string
The type of variable used for the data. Default is numpy.float64. Use
numpy.float32 if the data are large and precision is not an issue.
Returns:
"""
# Surfer ASCII grid structure
# DSAA Surfer ASCII GRD ID
# nCols nRows number of columns and rows
# xMin xMax X min max
# yMin yMax Y min max
# zMin zMax Z min max
# z11 z21 z31 ... List of Z values
with open(fname) as input_file:
# DSAA is a Surfer ASCII GRD ID (discard it for now)
input_file.readline()
# Read the number of columns (ny) and rows (nx)
ny, nx = [int(s) for s in input_file.readline().split()]
| |
<reponame>nueverest/BlowDryCSS
"""
Declares string for building blowdrycss - settings.py.
**Important:** Only called during initial installation.
"""
# python 2
from __future__ import absolute_import, print_function, unicode_literals
# builtins
import os
__author__ = '<NAME>'
__project__ = 'blowdrycss'
blowdrycss_settings_dot_py = """\"\"\"
**Usage Notes:**
The first time ``blowdrycss`` is run it auto-builds ``blowdrycss_settings.py`` via ``__init__.py``.
This makes it easy to find and customize related settings.
**Why such a long name? -- blowdrycss_settings.py**
Popular web frameworks such as django and flask already auto-generate a settings file called ``settings.py``.
The longer more specific name is used to prevent naming conflicts, and increase clarity.
**Parameters:**
| markdown_directory (*string*) -- Generally used for development purposes and github documentation.
| project_directory (*string*) -- Path to recursively search for all defined ``file_types``.
| css_directory (*string*) -- Path where the projects CSS files are located.
| docs_directory (*string*) -- Path where Sphinx docs are located (requires sphinx to be installed and run).
| output_file_name (*string*) -- Name of the generated output file contain DRY CSS definitions.
| output_extension (*string*) -- File extension of the generated output file. Must begin with '.'
| file_types = (*tuple of strings*) -- All file types/extensions to search for in the defined project_directory
that contain encoded class selectors.
| timing_enabled (*bool*) -- Run performance timer to see the performance of ``blowdrycss``.
| markdown_docs (*bool*) -- Generate a markdown files that provides a quick syntax and clashing alias reference.
Normally set to False except when posting to github.
| html_docs (*bool*) -- Generate a html file that provides a quick syntax and clashing alias reference.
| rst_docs (*bool*) -- Generate a sphinx rst file that provides a quick syntax and clashing alias reference.
| human_readable (*bool*) -- Generate a standard human readable css file. This file is named ``blowdry.css`` by
default.
| minify (*bool*) -- Generate a minified version of the css file. This file is named ``blowdry.min.css`` by default.
| media_queries_enabled (*bool*) -- Generate breakpoint and scaling media queries.
| use_em (*bool*) -- A ``pixels`` to ``em`` unit conversion flag. True enables unit conversion.
False disables unit conversions meaning any pixel value remains unchanged.
| base (*int*) -- Base used for unit conversion (typically set to 16). The pixel value will be divided by
``base`` during unit conversion.
| xxsmall (*tuple of floats*) -- (0px, upper limit in pixels)
| xsmall (*tuple of floats*) -- (xxsmall upper limit + 1px, upper limit in pixels)
| small (*tuple of floats*) -- (xsmall upper limit + 1px, upper limit in pixels)
| medium (*tuple of floats*) -- (small upper limit + 1px, upper limit in pixels)
| large (*tuple of floats*) -- (medium upper limit + 1px, upper limit in pixels)
| xlarge (*tuple of floats*) -- (large upper limit + 1px, upper limit in pixels)
| xxlarge (*tuple of floats*) -- (xlarge upper limit + 1px, upper limit in pixels)
| giant (*tuple of floats*) -- (xxlarge upper limit + 1px, upper limit in pixels)
| xgiant (*tuple of floats*) -- (giant upper limit + 1px, upper limit in pixels)
| xxgiant (*tuple of floats*) -- (xgiant upper limit + 1px, 1E+6) [Technically the upper limit is infinity,
but CSS does not permit it.]
**Custom Alias Syntax:**
| custom_property_alias_dict (*dict*) -- Contains customized shorthand encodings for a CSS property name.
e.g. ``'c-'`` is an alias for ``'color'``. This saves on typing.
| These encoded class selectors can be used inside of Web project files matching ``file_type``.
They can be customized to your liking.
| For more details about how to create custom aliases head on over to :doc:`advancedtopics`.
**cssutils Patch:**
``cssutils`` does not currently support all CSS 3 Units. The patch in this file allows length units of
``q``, ``ch``, ``rem``, ``vw``, ``vh``, ``vmin``, and ``vmax``. It also allows angle units of ``turn``.
\"\"\"
# python 2
from __future__ import absolute_import, division, unicode_literals
from builtins import round
# builtins
from os import getcwd, path
from string import digits
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL
# plugins
from cssutils import profile
__project__ = 'blowdrycss'
# Set project_directory to the one containing the files you want to DRY out.
# Change these to whatever you want.
cwd = getcwd()
markdown_directory = path.join(cwd, 'docs', 'markdown')
project_directory = path.join(cwd, 'examplesite')
css_directory = path.join(project_directory, 'css')
docs_directory = path.join(cwd, 'docs')
# Logging
logging_enabled = False
logging_level = DEBUG # Allowed: DEBUG, INFO, WARNING, ERROR, CRITICAL
log_to_console = False
log_to_file = False
log_directory = path.join(cwd, 'log')
log_file_name = 'blowdrycss.log'
one_mega_byte = 1048576
log_file_size = 4 * one_mega_byte # Max log file size
log_backup_count = 1 # Maximum number of backup log files.
# Output File
output_file_name = 'blowdry'
output_extension = '.css' # Must begin with '.' Could be anything .scss, .less, etc.
# All file types/extensions to search for in the defined project_directory that contain encoded class selectors.
# Available formats:
# ('*.html', '*.js', '*.ts', '*.vue', '*.jinja', '*.jinja2', '*.jnj', '*.ja', '*.djt', '*.djhtml',
# '*.cs', '*.aspx', '*.ascx', '*.master', '*.erb', '*.php', )
file_types = ('*.html', )
# Timing
time_limit = 1800 # Frequency of a comprehensive run in seconds. See timing.LimitTimer() for details.
# Boolean Flags
auto_generate = False # Auto-generate blowdry.css when a file that matches files_types is saved. (Watchdog)
hide_css_errors = True # Hide errors and warnings generated by cssutils.
timing_enabled = True # Run performance timer
markdown_docs = False # Generate a markdown files that provides a quick syntax and clashing alias reference.
html_docs = True # Generate a html file that provides a quick syntax and clashing alias reference.
rst_docs = False # Generate a sphinx rst file that provides a quick syntax and clashing alias reference.
human_readable = True # Generate a standard human readable css file.
minify = True # Generate a minified version of the css file.
media_queries_enabled = True # Generate breakpoint and scaling media queries.
# ...Not Implemented Yet...
# use_hex = True # Using hex and browser performance: http://jsperf.com/css-color-names-vs-hex-codes/18
# extra_dry = False # Combine identical CSS discovered under different class selector names.
# http_server = False # Auto-Start a simple webserver on localhost:8080.
# public_url = False # Uses ngrok to generate a temporary public url for testings and demo purposes.
# condense_classes = False # Edits HTML Files after discovering common patterns (Not DRY do not implement).
# Unit Conversion Defaults
use_em = True
base = 16
def px_to_em(pixels):
\"\"\" Convert a numeric value from px to em using ``settings.base`` as the unit conversion factor.
**Rules:**
- ``pixels`` shall only contain [0-9.-].
- Inputs that contain any other value are simply passed through unchanged.
- Default ``base`` is 16 meaning ``16px = 1rem``
**Note:** Does not check the ``property_name`` or ``use_em`` values. Rather, it blindly converts
whatever input is provided. The calling method is expected to know what it is doing.
Rounds float to a maximum of 4 decimal places.
:type pixels: str, int, float
:param pixels: A numeric value with the units stripped.
:return: (str)
- If the input is convertible return the converted number as a string with the units ``em``
appended to the end.
- If the input is not convertible return the unprocessed input.
>>> from blowdrycss_settings import px_to_em
>>> # settings.use_em = True
>>> px_to_em(pixels='-16.0')
-1em
>>> # settings.use_em = False
>>> px_to_em(pixels='42px')
42px
>>> # Invalid input passes through.
>>> px_to_em(pixels='invalid')
invalid
\"\"\"
if set(str(pixels)) <= set(digits + '-.'):
em = float(pixels) / float(base)
em = round(em, 4)
em = str(em) + 'em' # Add 'em'.
return em
return pixels
# Default Screen Breakpoints / Transition Triggers
# Tuple Format (Lower Limit, Upper Limit) in pixels.
# Note: These values change if unit conversion is enabled i.e. ``use_em`` is ``True``.
# Common Screen Resolutions: https://en.wikipedia.org/wiki/List_of_common_resolutions
xxsmall = (px_to_em(0), px_to_em(120)) # 0.0 - 7.5em
xsmall = (px_to_em(121), px_to_em(240)) # 7.5625 - 15.0em
small = (px_to_em(241), px_to_em(480)) # 15.0625 - 30.0em
medium = (px_to_em(481), px_to_em(720)) # 30.0625 - 45.0em # Typical mobile device break point @ 720px.
large = (px_to_em(721), px_to_em(1024)) # 45.0625 - 64.0em
xlarge = (px_to_em(1025), px_to_em(1366)) # 64.0625 - 85.375em
xxlarge = (px_to_em(1367), px_to_em(1920)) # 85.4375 - 120.0em
giant = (px_to_em(1921), px_to_em(2560)) # 120.0625 - 160.0em
xgiant = (px_to_em(2561), px_to_em(2800)) # 160.0625 - 175.0em
xxgiant = (px_to_em(2801), px_to_em(10**6)) # 175.0625 - float('inf')) # Python 2.x representation of Infinity.
# Custom CSS Property Syntax
custom_property_alias_dict = {
'background': {'bg-', },
'background-color': {'bgc-', 'bg-c-', 'bg-color-', },
'color': {'c-', },
'font-size': {'fsize-', 'f-size-', },
'font-weight': {'fweight-', 'f-weight-', },
'height': {'h-', },
'margin': {'m-', },
'margin-top': {'m-top-', },
'margin-bottom': {'m-bot-', },
'padding': {'p-', 'pad-', | |
(InstructionTextTokenType.InstructionToken, 'movs.w'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Ds'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, '@'),
(InstructionTextTokenType.TextToken, 'As')
],
},
{
'opmask': (0xf409, 0xff0f),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'movs.w',
'width': 2,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Ds', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'As', True, False, 2, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'movs.w'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Ds'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, '@'),
(InstructionTextTokenType.TextToken, 'As'),
(InstructionTextTokenType.TextToken, '+')
],
},
{
'opmask': (0xf40d, 0xff0f),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'movs.w',
'width': 2,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Ds', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'As+Is', True, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'movs.w'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Ds'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, '@'),
(InstructionTextTokenType.TextToken, 'As+Is')
],
},
{
'opmask': (0xf402, 0xff0f),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'movs.l',
'width': 4,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'As', True, False, -4, 0),
Oper(OpType.UNKNOWN, 'Ds', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'movs.l'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, '@-'),
(InstructionTextTokenType.TextToken, 'As'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Ds')
],
},
{
'opmask': (0xf406, 0xff0f),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'movs.l',
'width': 4,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'As', True, False, 0, 0),
Oper(OpType.UNKNOWN, 'Ds', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'movs.l'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, '@'),
(InstructionTextTokenType.TextToken, 'As'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Ds')
],
},
{
'opmask': (0xf40a, 0xff0f),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'movs.l',
'width': 4,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'As', True, False, 4, 0),
Oper(OpType.UNKNOWN, 'Ds', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'movs.l'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, '@'),
(InstructionTextTokenType.TextToken, 'As'),
(InstructionTextTokenType.TextToken, '+'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Ds')
],
},
{
'opmask': (0xf40e, 0xff0f),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'movs.l',
'width': 4,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'As+Is', True, False, 0, 0),
Oper(OpType.UNKNOWN, 'Ds', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'movs.l'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, '@'),
(InstructionTextTokenType.TextToken, 'As+Is'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Ds')
],
},
{
'opmask': (0xf403, 0xff0f),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'movs.l',
'width': 4,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Ds', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'As', True, False, -4, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'movs.l'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Ds'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, '@-'),
(InstructionTextTokenType.TextToken, 'As')
],
},
{
'opmask': (0xf407, 0xff0f),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'movs.l',
'width': 4,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Ds', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'As', True, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'movs.l'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Ds'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, '@'),
(InstructionTextTokenType.TextToken, 'As')
],
},
{
'opmask': (0xf40b, 0xff0f),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'movs.l',
'width': 4,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Ds', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'As', True, False, 4, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'movs.l'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Ds'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, '@'),
(InstructionTextTokenType.TextToken, 'As'),
(InstructionTextTokenType.TextToken, '+')
],
},
{
'opmask': (0xf40f, 0xff0f),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'movs.l',
'width': 4,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Ds', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'As+Is', True, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'movs.l'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Ds'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, '@'),
(InstructionTextTokenType.TextToken, 'As+Is')
],
},
{
'opmask': (0xf8008800, 0xff00ff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'pabs',
'width': 0,
'size': 4,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Sx', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Dz', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'pabs'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Sx'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Dz')
],
},
{
'opmask': (0xf800a800, 0xff00fff0),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'pabs',
'width': 0,
'size': 4,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Sy', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Dz', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'pabs'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Sy'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Dz')
],
},
{
'opmask': (0xf800b100, 0xff00ff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'padd',
'width': 0,
'size': 4,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Sx', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Sy', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Dz', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'padd'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Sx'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Sy'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Dz')
],
},
{
'opmask': (0xf800b200, 0xff00ff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'dct padd',
'width': 0,
'size': 4,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Sx', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Sy', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Dz', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'dct padd'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Sx'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Sy'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Dz')
],
},
{
'opmask': (0xf800b300, 0xff00ff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'dcf padd',
'width': 0,
'size': 4,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Sx', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Sy', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Dz', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'dcf padd'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Sx'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Sy'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Dz')
],
},
{
'opmask': (0xf800b000, 0xff00ff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'paddc',
'width': 0,
'size': 4,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Sx', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Sy', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Dz', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'paddc'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Sx'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Sy'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Dz')
],
},
{
'opmask': (0xf8008d00, 0xff00fff0),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'pclr',
'width': 0,
'size': 4,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Dz', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'pclr'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Dz')
],
},
{
'opmask': (0xf8008e00, 0xff00fff0),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'dct pclr',
'width': 0,
'size': 4,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Dz', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'dct pclr'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Dz')
],
},
{
'opmask': (0xf8008f00, 0xff00fff0),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'dcf pclr',
'width': 0,
'size': 4,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Dz', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'dcf pclr'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Dz')
],
},
{
'opmask': (0xf8008400, 0xff00ff0f),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'pcmp',
'width': 0,
'size': 4,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Sx', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Sy', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'pcmp'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Sx'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Sy')
],
},
{
'opmask': (0xf800d900, 0xff00ff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'pcopy',
'width': 0,
'size': 4,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Sx', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Dz', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'pcopy'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, 'Sx'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, 'Dz')
],
},
{
'opmask': (0xf800f900, 0xff00fff0),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'pcopy',
'width': 0,
'size': 4,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.UNKNOWN, 'Sy', False, False, 0, 0),
Oper(OpType.UNKNOWN, 'Dz', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, | |
import random
import time
import math
import copy
from generator import generator
from heuristic import evaluate
from rules import is_attacked, check_order
# The 'node' and 'cut' variables store, respectively,
# the number of nodes visited throughout the program
# and the number of times the tree was subjected to
# alpha-beta pruning.
node = 0
cut = 0
transposition_table = {}
first_search = {}
# The transposition table will function as a cache that holds
# previously evaluated positions' best move, which avoids
# spending resources on analysing all options from that point on.
class Hawkins:
def search(self, mx, player, depth, last_move, castling_chance):
"""
Performs iterative, deeper Minimax Searches
while there are computational resources left.
:param mx: board's state.
:param player: the color of AI's pieces.
:param depth: max search depth.
:param last_move: the last move played.
:param castling_chance: an array that holds
information on whether each player can castle.
"""
global transposition_table
global first_search
global node
global cut
if player == "Black":
maximize = True
else:
maximize = False
starting_point = time.time()
for level in range(1, depth + 1):
# Iterative deepening, perfect when dealing with time constrains
# as it allow us to store the best move from previous iterations
# and evaluate that same position first in the next one,
# which makes the pruning even more agressive.
search = Hawkins.minimax(self, mx, level, -1*10**5, 1*10**5, maximize, castling_chance, last_move)
best_move = search[1]
if search[0] == 10000:
return best_move
if time.time() - starting_point >= 10:
transposition_table = {}
return best_move
else:
first_search[mx] = best_move
# Storing the best move found in the transposition table,
# in order to evaluate it first and hopefuly discard
# other options sooner.
transposition_table = {}
return best_move
def minimax(self, mx, depth, alpha, beta, maximizing_player, castling_chance, last_move):
"""
A Minimax Search that makes use of
multiple alpha-beta pruning extensions,
neat move-ordering and many optimization techniques.
:param mx: board's state.
:param depth: max search depth.
:param alpha: alpha cutoff value.
:param beta: beta cutoff value.
:param maximizing_player: the evaluation function
returns positive values when the black pieces are
favored, and negative scores when the white pieces
take the advantage. With that in mind, depending on
the AI's pieces we can tell the search to maximize,
or to minimize each given decision (black to maximize,
white to minimize).
:param castling_chance: an array that holds
information on whether each player can castle.
:param last_move: the last move played.
"""
global node
global cut
global first_search
global transposition_table
white_pieces = {"P", "R", "K", "Q", "N", "B"}
black_pieces = {"p", "r", "k", "q", "n", "b"}
black_castling = [True if x != 0 else False for x in castling_chance][2:]
white_castling = [True if x != 0 else False for x in castling_chance][:2]
node += 1
# We should not waste resources analyzing
# a position previously evaluated. We must
# immediately return the best move recorded.
if mx in transposition_table.keys() and transposition_table[mx][3] >= depth:
if transposition_table[mx][2] == "Exact":
if alpha <= transposition_table[mx][0] <= beta:
return transposition_table[mx][:2]
if transposition_table[mx][2] == "Beta":
if transposition_table[mx][0] > beta:
return transposition_table[mx][:2]
if transposition_table[mx][2] == "Alpha":
if transposition_table[mx][0] < alpha:
return transposition_table[mx][:2]
if depth == 0:
return (evaluate(mx), mx)
if not maximizing_player:
# Checking if the 'White' castled in this play
# by looking at the position of its key pieces.
# Same goes for 'Black' after the 'else' statement.
if True in white_castling:
if mx[7*8+4] != "K":
white_castling = [False, False]
else:
if white_castling[0] == True and mx[7*8] != "R":
white_castling[0] = False
if white_castling[1] == True and mx[7*8 + 7]!= "R":
white_castling[1] = False
player, pieces, updated_castling = "White", white_pieces, white_castling
else:
if True in black_castling:
if mx[4] != "k":
black_castling = [False, False]
else:
if black_castling[0] == True and mx[0] != "r":
black_castling[0] = False
if black_castling[1] == True and mx[7]!= "r":
black_castling[1] = False
player, pieces, updated_castling = "Black", black_pieces, black_castling
# Generating all possible moves based
# on the player's pieces and permission to castle.
moves_generator = generator.possible_matrix(mx, player, tuple(pieces), last_move, tuple(updated_castling))
possible_states = moves_generator[0]
if len(possible_states) == 0:
if is_attacked(mx, player, tuple(pieces), last_move, False):
if player == "White":
return (10000, mx)
# Checkmate must be much more valuable than
# all the pieces' values combined.
else:
return (-10000, mx)
return (0, mx)
if mx in first_search.keys():
# At first, the only move stored in the transposition table
# is the best move found in the previous iteration.
# We want to evaluate it again at a deeper search
# before any other play as it is the most promissing.
possible_states.insert(0, first_search[mx])
flag = ""
temp_alpha = alpha
if maximizing_player:
max_eval = -1*10**5
for state in possible_states:
node += 1
eval = Hawkins.minimax(self, state, depth-1, alpha, beta, False, castling_chance, last_move)
if eval[0] > max_eval:
max_eval = eval[0]
chosen = state
alpha = max(alpha, eval[0])
if beta <= alpha:
# Pruning
flag = "Beta"
cut += 1
break
if flag != "Beta":
if temp_alpha == alpha:
flag = "Alpha"
elif flag == "":
flag = "Exact"
transposition_table[mx] = (max_eval, chosen, flag, depth)
return (max_eval, chosen)
else:
min_eval = 1*10**5
for state in possible_states:
node += 1
eval = Hawkins.minimax(self, state, depth-1, alpha, beta, True, castling_chance, last_move)
if eval[0] < min_eval:
min_eval = eval[0]
chosen = state
beta = min(beta, eval[0])
if beta <= alpha:
# Pruning
flag = "Beta"
cut += 1
break
if flag != "Beta":
if temp_alpha == alpha:
flag = "Alpha"
elif flag == "":
flag = "Exact"
transposition_table[mx] = (min_eval, chosen, flag, depth)
return (min_eval, chosen)
class Tree:
def __init__(self, board):
"""
Tree constructor.
"""
self.board = board
self.visits = 0
self.score = 0
self.children = []
class Pluto:
def search(self, mx, player, last_move, castling_chance):
"""
Monte Carlo Tree Search algorithm, that uses
random rollouts and no previous knowledge of
the game to play.
:param mx: board's state.
:param player: the color of AI's pieces.
:param last_move: the last move played.
:param castling_chance: an array that holds
information on whether each player can castle.
"""
depth = 3
#Cutoff depth
starting_point = time.time()
root = Tree(mx)
while time.time() - starting_point <= 2:
leaf = Pluto.expand(self, root.board, player, root, last_move, castling_chance)
result = Pluto.rollout(self, player, leaf, last_move, castling_chance, depth)
Pluto.backpropagate(self, leaf, root, result)
return Pluto.best_child(self, root).board
def expand(self, mx, player, root, last_move, castling_chance):
"""
On this phase, we expand the tree by adding
to the root its child nodes and we select
one of those states to be explored.
:param mx: board's state
:param player: the color of AI's pieces.
:param root: root object.
:param last_move: the last move played.
:param castling_chance: an array that holds
information on whether each player can castle.
"""
white_pieces = {"P", "R", "K", "Q", "N", "B"}
black_pieces = {"p", "r", "k", "q", "n", "b"}
black_castling = [True if x != 0 else False for x in castling_chance][2:]
white_castling = [True if x != 0 else False for x in castling_chance][:2]
if player == "White":
pieces, updated_castling = white_pieces, white_castling
else:
pieces, updated_castling = black_pieces, black_castling
if len(root.children) == 0:
matrices = generator.possible_matrix(mx, player, tuple(pieces), last_move, tuple(updated_castling))[0]
root.children = [Tree(matrix) for matrix in matrices]
for child in root.children:
if child.visits == 0:
# We must visit the nodes that haven't
# been explored yet first.
return child
# In case every single node has been chosen
# atleast once, then we must choose the one
# that seems to have the most potential.
return Pluto.expansion_choice(self, root)
def rollout(self, player, leaf, last_move, castling_chance, depth):
"""
Random rollout phase.
:param player: color of AI's pieces.
:param leaf: child node.
:param last_move: the last move played.
:param castling chance: an array that holds
information on whether each player can castle.
:param depth: max_depth search.
"""
level = 0
mx = leaf.board
white_pieces = {"P", "R", "K", "Q", "N", "B"}
black_pieces = {"p", "r", "k", "q", | |
"""REST API view model serializers for the projectroles app"""
from email.utils import parseaddr
from django.conf import settings
from django.contrib.auth import get_user_model
from rest_framework import exceptions, serializers
from drf_keyed_list import KeyedListSerializer
from projectroles.models import (
Project,
Role,
RoleAssignment,
ProjectInvite,
SODAR_CONSTANTS,
)
from projectroles.utils import build_secret, get_expiry_date
from projectroles.views import (
ProjectModifyMixin,
RoleAssignmentModifyMixin,
ProjectInviteMixin,
)
# SODAR constants
PROJECT_TYPE_PROJECT = SODAR_CONSTANTS['PROJECT_TYPE_PROJECT']
PROJECT_TYPE_CATEGORY = SODAR_CONSTANTS['PROJECT_TYPE_CATEGORY']
PROJECT_ROLE_OWNER = SODAR_CONSTANTS['PROJECT_ROLE_OWNER']
PROJECT_ROLE_DELEGATE = SODAR_CONSTANTS['PROJECT_ROLE_DELEGATE']
PROJECT_ROLE_CONTRIBUTOR = SODAR_CONSTANTS['PROJECT_ROLE_CONTRIBUTOR']
PROJECT_ROLE_GUEST = SODAR_CONSTANTS['PROJECT_ROLE_GUEST']
SYSTEM_USER_GROUP = SODAR_CONSTANTS['SYSTEM_USER_GROUP']
# Local constants
REMOTE_MODIFY_MSG = (
'Modification of remote projects is not allowed, modify on '
'the SOURCE site instead'
)
User = get_user_model()
# Base Serializers -------------------------------------------------------------
class SODARModelSerializer(serializers.ModelSerializer):
"""Base serializer for any SODAR model with a sodar_uuid field"""
sodar_uuid = serializers.CharField(read_only=True)
class Meta:
pass
def to_representation(self, instance):
"""
Override to_representation() to ensure sodar_uuid is included for object
creation POST responses.
"""
ret = super().to_representation(instance)
if 'sodar_uuid' not in ret and 'sodar_uuid' in self.context:
ret['sodar_uuid'] = str(self.context['sodar_uuid'])
return ret
def save(self, **kwargs):
"""
Override save() to ensure sodar_uuid is included for object creation
POST responses.
"""
obj = super().save(**kwargs)
return self.post_save(obj)
def post_save(self, obj):
"""
Function to call at the end of a custom save() method. Ensures the
returning of sodar_uuid in object creation POST responses.
:param obj: Object created in save()
:return: obj
"""
if hasattr(obj, 'sodar_uuid'):
self.context['sodar_uuid'] = obj.sodar_uuid
return obj
class SODARProjectModelSerializer(SODARModelSerializer):
"""
Base serializer for SODAR models with a project relation.
The project field is read only because it is retrieved through the object
reference in the URL.
"""
project = serializers.SlugRelatedField(
slug_field='sodar_uuid', read_only=True
)
class Meta:
pass
def to_representation(self, instance):
"""
Override to_representation() to ensure the project value is included
in responses.
"""
ret = super().to_representation(instance)
if 'project' not in ret and 'project' in self.context:
ret['project'] = str(self.context['project'].sodar_uuid)
return ret
def create(self, validated_data):
"""Override create() to add project into validated data"""
if 'project' not in validated_data and 'project' in self.context:
validated_data['project'] = self.context['project']
return super().create(validated_data)
class SODARNestedListSerializer(SODARModelSerializer):
"""
Serializer to display nested SODAR models as dicts with sodar_uuid as key.
"""
class Meta:
list_serializer_class = KeyedListSerializer
keyed_list_serializer_field = 'sodar_uuid'
duplicate_list_key = True # Extension to drf-keyed-list
def to_representation(self, instance):
"""
Override to_representation() to pop project from a nested list
representation, where the project context is already known in the
topmost model.
"""
ret = super().to_representation(instance)
if self.context.get('project'):
ret.pop('project', None)
return ret
class SODARUserSerializer(SODARModelSerializer):
"""Serializer for the user model used in SODAR Core based sites"""
class Meta:
model = User
fields = ['username', 'name', 'email', 'sodar_uuid']
# Projectroles Serializers -----------------------------------------------------
class RoleAssignmentValidateMixin:
"""Mixin for common role assignment validation"""
def validate(self, attrs):
project = self.context['project']
current_user = self.context['request'].user
del_limit = getattr(settings, 'PROJECTROLES_DELEGATE_LIMIT', 1)
# Validation for remote sites and projects
if project.is_remote():
raise serializers.ValidationError(REMOTE_MODIFY_MSG)
if 'role' not in attrs:
return attrs
# Do not allow modifying/inviting owner
if attrs['role'].name == PROJECT_ROLE_OWNER:
raise serializers.ValidationError('Modifying owner not allowed')
# Check delegate perms
if attrs[
'role'
].name == PROJECT_ROLE_DELEGATE and not current_user.has_perm(
'projectroles.update_project_delegate', project
):
raise exceptions.PermissionDenied(
'User lacks permission to assign delegates'
)
# Check delegate limit
if (
attrs['role'].name == PROJECT_ROLE_DELEGATE
and del_limit != 0
and project.get_delegates(exclude_inherited=True).count()
>= del_limit
):
raise serializers.ValidationError(
'Project delegate limit of {} has been reached'.format(
del_limit
)
)
return attrs
class RoleAssignmentSerializer(
RoleAssignmentModifyMixin,
RoleAssignmentValidateMixin,
SODARProjectModelSerializer,
):
"""Serializer for the RoleAssignment model"""
role = serializers.SlugRelatedField(
slug_field='name', queryset=Role.objects.all()
)
user = serializers.SlugRelatedField(
slug_field='sodar_uuid', queryset=User.objects.all()
)
class Meta:
model = RoleAssignment
fields = ['project', 'role', 'user', 'sodar_uuid']
def validate(self, attrs):
attrs = super().validate(attrs)
project = self.context['project']
# Do not allow updating user
if (
self.instance
and 'user' in attrs
and attrs['user'] != self.instance.user
):
raise serializers.ValidationError(
'Updating the user is not allowed, create a new role '
'assignment instead'
)
# Check for existing role if creating
if not self.instance:
old_as = RoleAssignment.objects.filter(
project=project, user=attrs['user']
).first()
if old_as:
raise serializers.ValidationError(
'User already has the role of "{}" in project '
'(UUID={})'.format(old_as.role.name, old_as.sodar_uuid)
)
# Add user to instance for PATCH requests
if self.instance and not attrs.get('user'):
attrs['user'] = self.instance.user
return attrs
def save(self, **kwargs):
"""Override save() to handle saving locally or through Taskflow"""
# NOTE: Role not updated in response data unless we set self.instance
# TODO: Figure out a clean fix
self.instance = self.post_save(
self.modify_assignment(
data=self.validated_data,
request=self.context['request'],
project=self.context['project'],
instance=self.instance,
)
)
return self.instance
class RoleAssignmentNestedListSerializer(
SODARNestedListSerializer, RoleAssignmentSerializer
):
"""Nested list serializer for the RoleAssignment model."""
user = SODARUserSerializer(read_only=True)
class Meta(SODARNestedListSerializer.Meta):
model = RoleAssignment
fields = ['role', 'user', 'sodar_uuid']
read_only_fields = ['role']
class ProjectInviteSerializer(
ProjectInviteMixin, RoleAssignmentValidateMixin, SODARProjectModelSerializer
):
"""Serializer for the ProjectInvite model"""
issuer = SODARUserSerializer(read_only=True)
role = serializers.SlugRelatedField(
slug_field='name', queryset=Role.objects.all()
)
class Meta:
model = ProjectInvite
fields = [
'email',
'project',
'role',
'issuer',
'date_created',
'date_expire',
'message',
'sodar_uuid',
]
read_only_fields = ['issuer', 'date_created', 'date_expire', 'active']
def validate(self, attrs):
attrs = super().validate(attrs)
# Validate email
if not parseaddr(attrs['email'])[1]:
raise serializers.ValidationError(
'Invalid email address "{}"'.format(attrs['email'])
)
# Check for existing user
user = User.objects.filter(email=attrs['email']).first()
if user:
raise serializers.ValidationError(
'User already exist in system with given email '
'"{}": {} ({})'.format(
attrs['email'], user.username, user.sodar_uuid
)
)
return attrs
def create(self, validated_data):
validated_data['issuer'] = self.context['request'].user
validated_data['date_expire'] = get_expiry_date()
validated_data['secret'] = build_secret()
return super().create(validated_data)
def save(self, **kwargs):
obj = super().save(**kwargs)
self.handle_invite(obj, self.context['request'], add_message=False)
return self.post_save(obj)
class ProjectSerializer(ProjectModifyMixin, SODARModelSerializer):
"""Serializer for the Project model"""
owner = serializers.CharField(write_only=True)
parent = serializers.SlugRelatedField(
slug_field='sodar_uuid',
many=False,
allow_null=True,
queryset=Project.objects.filter(type=PROJECT_TYPE_CATEGORY),
)
readme = serializers.CharField(required=False, allow_blank=True)
roles = RoleAssignmentNestedListSerializer(read_only=True, many=True)
class Meta:
model = Project
fields = [
'title',
'type',
'parent',
'description',
'readme',
'public_guest_access',
'submit_status',
'owner',
'roles',
'sodar_uuid',
]
read_only_fields = ['submit_status']
def validate(self, attrs):
site_mode = getattr(
settings,
'PROJECTROLES_SITE_MODE',
SODAR_CONSTANTS['SITE_MODE_SOURCE'],
)
target_create = getattr(settings, 'PROJECTROLES_TARGET_CREATE', True)
disable_categories = getattr(
settings, 'PROJECTROLES_DISABLE_CATEGORIES', False
)
current_user = self.context['request'].user
# Validation for remote sites and projects
if self.instance and self.instance.is_remote():
raise serializers.ValidationError(REMOTE_MODIFY_MSG)
elif (
not self.instance
and site_mode == SODAR_CONSTANTS['SITE_MODE_TARGET']
and not target_create
):
raise serializers.ValidationError(
'Creation of local projects not allowed on this target site'
)
# Validate parent
parent = attrs.get('parent')
# Attempting to move project under category without perms
if (
parent
and not current_user.is_superuser
and not current_user.has_perm('projectroles.create_project', parent)
and (not self.instance or self.instance.parent != parent)
):
raise exceptions.PermissionDenied(
'User lacks permission to place project under given category'
)
if parent and parent.type != PROJECT_TYPE_CATEGORY:
raise serializers.ValidationError('Parent is not a category')
elif (
'parent' in attrs
and not parent
and self.instance
and self.instance.parent
and not current_user.is_superuser
):
raise exceptions.PermissionDenied(
'Only superusers are allowed to place categories in root'
)
# Attempting to create/move project in root
if (
'parent' in attrs
and not parent
and attrs.get('type') == PROJECT_TYPE_PROJECT
and not disable_categories
):
raise serializers.ValidationError(
'Project must be placed under a category'
)
# Ensure we are not moving a category under one of its children
if (
parent
and self.instance
and self.instance.type == PROJECT_TYPE_CATEGORY
and parent in self.instance.get_children(flat=True)
):
raise serializers.ValidationError(
'Moving a category under its own child is not allowed'
)
# Validate type
if (
attrs.get('type')
and self.instance
and attrs['type'] != self.instance.type
):
raise serializers.ValidationError(
'Changing the project type is not allowed'
)
# Validate title
if parent and attrs.get('title') == parent.title:
raise serializers.ValidationError('Title can\'t match with parent')
if (
attrs.get('title')
and not self.instance
and Project.objects.filter(title=attrs['title'], parent=parent)
):
raise serializers.ValidationError(
'Title must be unique within parent'
)
# Validate type
if attrs.get('type') not in [
PROJECT_TYPE_CATEGORY,
PROJECT_TYPE_PROJECT,
None,
]: # None is ok for PATCH (will be updated in modify_project())
raise serializers.ValidationError(
'Type is not {} or {}'.format(
PROJECT_TYPE_CATEGORY, PROJECT_TYPE_PROJECT
)
)
# Validate and set owner
if attrs.get('owner'):
if (
self.partial
and attrs['owner'] != self.instance.get_owner().user.sodar_uuid
):
raise serializers.ValidationError(
'Modifying owner not allowed here, '
'use ownership transfer API view instead'
)
owner = User.objects.filter(sodar_uuid=attrs['owner']).first()
if not owner:
raise serializers.ValidationError('Owner not found')
attrs['owner'] = owner
# Set readme
if 'readme' in attrs and 'raw' in attrs['readme']:
attrs['readme'] = attrs['readme']['raw']
return attrs
def save(self, **kwargs):
"""Override save() to handle saving locally or through Taskflow"""
# NOTE: post_save() not needed here since we do an atomic model.save()
return self.modify_project(
data=self.validated_data,
request=self.context['request'],
instance=self.instance,
)
def to_representation(self, instance):
"""Override to make sure fields are correctly returned."""
ret = super().to_representation(instance)
parent = ret.get('parent')
project = Project.objects.get(
title=ret['title'],
**{'parent__sodar_uuid': parent} | |
<filename>gfootball/env/wrappers.py
# coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment that can be used with OpenAI Baselines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import cv2
from functools import partial
from gfootball.env import observation_preprocessing
import gym
import numpy as np
class PeriodicDumpWriter(gym.Wrapper):
"""A wrapper that only dumps traces/videos periodically."""
def __init__(self, env, dump_frequency):
gym.Wrapper.__init__(self, env)
self._dump_frequency = dump_frequency
self._original_render = env._config['render']
self._original_dump_config = {
'write_video': env._config['write_video'],
'dump_full_episodes': env._config['dump_full_episodes'],
'dump_scores': env._config['dump_scores'],
}
self._current_episode_number = 0
def step(self, action):
return self.env.step(action)
def reset(self):
if (self._dump_frequency > 0 and
(self._current_episode_number % self._dump_frequency == 0)):
self.env._config.update(self._original_dump_config)
self.env._config.update({'render': True})
else:
self.env._config.update({'render': self._original_render,
'write_video': False,
'dump_full_episodes': False,
'dump_scores': False})
self._current_episode_number += 1
return self.env.reset()
class Simple115StateWrapper(gym.ObservationWrapper):
"""A wrapper that converts an observation to 115-features state."""
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
shape = (self.env.unwrapped._config.number_of_players_agent_controls(), 115)
self.observation_space = gym.spaces.Box(
low=-1, high=1, shape=shape, dtype=np.float32)
def observation(self, observation):
"""Converts an observation into simple115 format.
Args:
observation: observation that the environment returns
Returns:
(N, 115) shaped representation, where N stands for the number of players
being controlled.
"""
final_obs = []
for obs in observation:
o = []
o.extend(obs['left_team'].flatten())
o.extend(obs['left_team_direction'].flatten())
o.extend(obs['right_team'].flatten())
o.extend(obs['right_team_direction'].flatten())
# If there were less than 11vs11 players we backfill missing values with
# -1.
# 88 = 11 (players) * 2 (teams) * 2 (positions & directions) * 2 (x & y)
if len(o) < 88:
o.extend([-1] * (88 - len(o)))
# ball position
o.extend(obs['ball'])
# ball direction
o.extend(obs['ball_direction'])
# one hot encoding of which team owns the ball
if obs['ball_owned_team'] == -1:
o.extend([1, 0, 0])
if obs['ball_owned_team'] == 0:
o.extend([0, 1, 0])
if obs['ball_owned_team'] == 1:
o.extend([0, 0, 1])
active = [0] * 11
if obs['active'] != -1:
active[obs['active']] = 1
o.extend(active)
game_mode = [0] * 7
game_mode[obs['game_mode']] = 1
o.extend(game_mode)
final_obs.append(o)
return np.array(final_obs, dtype=np.float32)
class PixelsStateWrapper(gym.ObservationWrapper):
"""A wrapper that extracts pixel representation."""
def __init__(self, env, grayscale=True,
channel_dimensions=(observation_preprocessing.SMM_WIDTH,
observation_preprocessing.SMM_HEIGHT)):
gym.ObservationWrapper.__init__(self, env)
self._grayscale = grayscale
self._channel_dimensions = channel_dimensions
self.observation_space = gym.spaces.Box(
low=0, high=255,
shape=(self.env.unwrapped._config.number_of_players_agent_controls(),
channel_dimensions[1], channel_dimensions[0],
1 if grayscale else 3),
dtype=np.uint8)
def observation(self, obs):
o = []
for observation in obs:
frame = observation['frame']
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self._channel_dimensions[0],
self._channel_dimensions[1]),
interpolation=cv2.INTER_AREA)
if self._grayscale:
frame = np.expand_dims(frame, -1)
o.append(frame)
return np.array(o, dtype=np.uint8)
class SMMWrapper(gym.ObservationWrapper):
"""A wrapper that converts an observation to a minimap."""
def __init__(self, env,
channel_dimensions=(observation_preprocessing.SMM_WIDTH,
observation_preprocessing.SMM_HEIGHT)):
gym.ObservationWrapper.__init__(self, env)
self._channel_dimensions = channel_dimensions
shape = (self.env.unwrapped._config.number_of_players_agent_controls(),
channel_dimensions[1], channel_dimensions[0],
len(observation_preprocessing.get_smm_layers(
self.env.unwrapped._config)))
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=shape, dtype=np.uint8)
def observation(self, obs):
return observation_preprocessing.generate_smm(
obs, channel_dimensions=self._channel_dimensions,
config=self.env.unwrapped._config)
class SingleAgentObservationWrapper(gym.ObservationWrapper):
"""A wrapper that returns an observation only for the first agent."""
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(
low=env.observation_space.low[0],
high=env.observation_space.high[0],
dtype=env.observation_space.dtype)
def observation(self, obs):
return obs[0]
class SingleAgentRewardWrapper(gym.RewardWrapper):
"""A wrapper that returns a reward only for the first agent."""
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
return reward[0]
class CheckpointRewardWrapper(gym.RewardWrapper):
"""A wrapper that adds a dense checkpoint reward."""
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
self._collected_checkpoints = {True: 0, False: 0}
self._num_checkpoints = 10
self._checkpoint_reward = 0.1
def reset(self):
self._collected_checkpoints = {True: 0, False: 0}
return self.env.reset()
def reward(self, reward):
if self.env.unwrapped.last_observation is None:
return reward
assert len(reward) == len(self.env.unwrapped.last_observation)
for rew_index in range(len(reward)):
o = self.env.unwrapped.last_observation[rew_index]
is_left_to_right = o['is_left']
if reward[rew_index] == 1:
reward[rew_index] += self._checkpoint_reward * (
self._num_checkpoints -
self._collected_checkpoints[is_left_to_right])
self._collected_checkpoints[is_left_to_right] = self._num_checkpoints
continue
# Check if the active player has the ball.
if ('ball_owned_team' not in o or
o['ball_owned_team'] != (0 if is_left_to_right else 1) or
'ball_owned_player' not in o or
o['ball_owned_player'] != o['active']):
continue
if is_left_to_right:
d = ((o['ball'][0] - 1) ** 2 + o['ball'][1] ** 2) ** 0.5
else:
d = ((o['ball'][0] + 1) ** 2 + o['ball'][1] ** 2) ** 0.5
# Collect the checkpoints.
# We give reward for distance 1 to 0.2.
while (self._collected_checkpoints[is_left_to_right] <
self._num_checkpoints):
if self._num_checkpoints == 1:
threshold = 0.99 - 0.8
else:
threshold = (0.99 - 0.8 / (self._num_checkpoints - 1) *
self._collected_checkpoints[is_left_to_right])
if d > threshold:
break
reward[rew_index] += self._checkpoint_reward
self._collected_checkpoints[is_left_to_right] += 1
return reward
class FrameStack(gym.Wrapper):
"""Stack k last observations."""
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self.obs = collections.deque([], maxlen=k)
low = env.observation_space.low
high = env.observation_space.high
low = np.concatenate([low] * k, axis=-1)
high = np.concatenate([high] * k, axis=-1)
self.observation_space = gym.spaces.Box(
low=low, high=high, dtype=env.observation_space.dtype)
def reset(self):
observation = self.env.reset()
self.obs.extend([observation] * self.obs.maxlen)
return self._get_observation()
def step(self, action):
observation, reward, done, info = self.env.step(action)
self.obs.append(observation)
return self._get_observation(), reward, done, info
def _get_observation(self):
return np.concatenate(list(self.obs), axis=-1)
class MAPOListStateWrapper(gym.ObservationWrapper):
"""A wrapper that converts an observation to 197-features state.
Each Observation is converted to coordinates relative to the respective player's absolute position (ego-frame)
In addition, each observation is modified so as to respect partial observability constraints resulting
from:
- restricted view wedge (xy direction)
- depth noise
- view obstruction
"""
def __init__(self,
env,
po_view_cone_xy_opening=160,
po_player_width=0.060,
po_player_view_radius=-1,
po_depth_noise='default',
render_points=False,
full_obs_flag=False):
gym.ObservationWrapper.__init__(self, env)
self.po_view_cone_xy_opening = po_view_cone_xy_opening
self.po_player_width = po_player_width
# Fixed view radius almost never used.
self.po_player_view_radius = po_player_view_radius
self.po_depth_noise = {'type': 'gaussian', 'sigma': 0.1, 'attenuation_type': 'fixed_angular_resolution',
'angular_resolution_degrees': 0.2} \
if po_depth_noise == 'default' else po_depth_noise
self.number_of_players_controlled = self.env.unwrapped._config.number_of_players_agent_controls()
self.observation_space = gym.spaces.Box(
low=-1, high=1, shape=(self.number_of_players_controlled, 197), dtype=np.float32)
# Assign on first observation, right player detection doesn't work properly
self.n_left_players = 0
self.n_right_players = 0
self.player_view_directions = {}
self.render_points = render_points
self.full_obs = full_obs_flag
def _plot_points(self, obj_lists):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=self.number_of_players_controlled, ncols=6, figsize=(30, 2.1))
for player_id, obj_lst in enumerate(obj_lists):
# render scene in matplotlib
paxis = axes[player_id] if self.number_of_players_controlled > 1 else axes
# plot whether objects are set to visible for each agent
x, y, z = list(zip(*[obj.raw_obs['right_team'].tolist() + [0.0] for obj in obj_lst if
obj.label[:12] == 'right_player']))
paxis[0].scatter(x, y, color=(1.0, 0.8, 0.8))
paxis[1].scatter(x, z, color=(1.0, 0.8, 0.8))
paxis[2].scatter(y, z, color=(1.0, 0.8, 0.8))
u, v, w = list(zip(
*[obj.raw_obs['right_team_direction'].tolist() + [0.0] for obj in obj_lst if
obj.label[:12] == 'right_player']))
paxis[0].quiver(x, y, u, v, color=(1.0, 0.8, 0.8))
paxis[1].quiver(x, z, u, w, color=(1.0, 0.8, 0.8))
paxis[2].quiver(y, z, v, w, color=(1.0, 0.8, 0.8))
x, y, z = list(zip(
*[obj.raw_obs['left_team'].tolist() + [0.0] for obj in obj_lst if obj.label[:11] == 'left_player']))
paxis[0].scatter(x, y, color=(0.8, 1.0, 0.8))
paxis[1].scatter(x, z, color=(0.8, 1.0, 0.8))
paxis[2].scatter(y, z, color=(0.8, 1.0, 0.8))
u, v, w = list(zip(
*[obj.raw_obs['left_team_direction'].tolist() + [0.0] for obj in obj_lst if
obj.label[:11] == 'left_player']))
paxis[0].quiver(x, y, u, v, color=(0.8, 1.0, 0.8))
paxis[1].quiver(x, z, u, w, color=(0.8, 1.0, 0.8))
paxis[2].quiver(y, z, v, w, color=(0.8, 1.0, 0.8))
x, y, z = list(zip(*[obj.raw_obs['ball'].tolist() for obj in obj_lst if obj.type == 'ball']))
paxis[0].scatter(x, y, color=(0.8, 0.8, 1.0))
paxis[1].scatter(x, z, color=(0.8, 0.8, 1.0))
paxis[2].scatter(y, z, color=(0.8, 0.8, 1.0))
u, v, w = list(zip(
*[obj.raw_obs['ball_direction'].tolist() for obj in obj_lst if obj.type == 'ball']))
paxis[0].quiver(x, y, u, v, color=(0.8, 0.8, 1.0))
paxis[1].quiver(x, z, u, w, color=(0.8, 0.8, 1.0))
paxis[2].quiver(y, z, v, w, color=(0.8, 0.8, 1.0))
paxis[0].set_xlim(-(1 + 0.1), 1 + 0.1)
paxis[0].set_ylim(-(0.42 + 0.1), (0.42 + 0.1))
paxis[1].set_xlim(-(1 + 0.1), (1 + 0.1))
paxis[1].set_ylim(0 - 1, 10)
paxis[2].set_xlim(-(0.42 + 0.1), (0.42 + 0.1))
paxis[2].set_ylim(0 - 1, 10)
offset = 3
# plot local observations
x, y, z = list(zip(
*[(obj.location[0], obj.location[1], 0.0) for obj in obj_lst if obj.label[:12] == "right_player"]))
u, v, w = list(zip(
*[obj.attrs['view_direction'].tolist() for obj in obj_lst if obj.label[:12] == 'right_player']))
paxis[0 + offset].scatter(x, y, color=(1.0, 0.8, 0.8))
paxis[1 + offset].scatter(x, z, color=(1.0, 0.8, 0.8))
paxis[2 + offset].scatter(y, z, color=(1.0, 0.8, 0.8))
paxis[0 + offset].quiver(x, y, u, v, color=(1.0, 0.8, 0.8))
paxis[1 + offset].quiver(x, z, u, w, color=(1.0, 0.8, 0.8))
paxis[2 + offset].quiver(y, z, v, w, color=(1.0, 0.8, 0.8))
x, y, z = list(zip(
*[(obj.location[0], obj.location[1], 0.0) for obj in obj_lst | |
""" Cisco_IOS_XE_ios_events_oper
This module contains a collection of YANG definitions
for asynchronous events from network element.
Copyright (c) 2016\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class BgpPstate(Enum):
"""
BgpPstate (Enum Class)
GGP state
.. data:: bgp_state_idle = 0
.. data:: bgp_state_connect = 1
.. data:: bgp_state_active = 2
.. data:: bgp_state_opensent = 3
.. data:: bgp_state_openconfirm = 4
.. data:: bgp_state_established = 5
.. data:: bgp_state_clearing = 6
.. data:: bgp_state_deleted = 7
"""
bgp_state_idle = Enum.YLeaf(0, "bgp-state-idle")
bgp_state_connect = Enum.YLeaf(1, "bgp-state-connect")
bgp_state_active = Enum.YLeaf(2, "bgp-state-active")
bgp_state_opensent = Enum.YLeaf(3, "bgp-state-opensent")
bgp_state_openconfirm = Enum.YLeaf(4, "bgp-state-openconfirm")
bgp_state_established = Enum.YLeaf(5, "bgp-state-established")
bgp_state_clearing = Enum.YLeaf(6, "bgp-state-clearing")
bgp_state_deleted = Enum.YLeaf(7, "bgp-state-deleted")
class DhcpServerStateVal(Enum):
"""
DhcpServerStateVal (Enum Class)
DHCP Server state
.. data:: dhcp_server_state_up = 0
.. data:: dhcp_server_state_down = 1
"""
dhcp_server_state_up = Enum.YLeaf(0, "dhcp-server-state-up")
dhcp_server_state_down = Enum.YLeaf(1, "dhcp-server-state-down")
class FibUpdatesAfType(Enum):
"""
FibUpdatesAfType (Enum Class)
FIB updates AF type
.. data:: fib_updates_af_unknown = 0
.. data:: fib_updates_af_ipv4 = 1
.. data:: fib_updates_af_ipv6 = 2
"""
fib_updates_af_unknown = Enum.YLeaf(0, "fib-updates-af-unknown")
fib_updates_af_ipv4 = Enum.YLeaf(1, "fib-updates-af-ipv4")
fib_updates_af_ipv6 = Enum.YLeaf(2, "fib-updates-af-ipv6")
class HardwareSensorType(Enum):
"""
HardwareSensorType (Enum Class)
Hardware Sensor Type
.. data:: hw_sensor_board = 0
Hardware sensor board
.. data:: hw_sensor_cpu_junction = 1
Hardware sensor CPU junction
.. data:: hw_sensor_dram = 2
Hardware sensor DRAM
.. data:: hw_sensor_pim = 3
Hardware sensor PIM
"""
hw_sensor_board = Enum.YLeaf(0, "hw-sensor-board")
hw_sensor_cpu_junction = Enum.YLeaf(1, "hw-sensor-cpu-junction")
hw_sensor_dram = Enum.YLeaf(2, "hw-sensor-dram")
hw_sensor_pim = Enum.YLeaf(3, "hw-sensor-pim")
class InterfaceNotifState(Enum):
"""
InterfaceNotifState (Enum Class)
Interface Notification state
.. data:: interface_notif_state_up = 0
.. data:: interface_notif_state_down = 1
"""
interface_notif_state_up = Enum.YLeaf(0, "interface-notif-state-up")
interface_notif_state_down = Enum.YLeaf(1, "interface-notif-state-down")
class IntfAdminState(Enum):
"""
IntfAdminState (Enum Class)
Interface admin state
.. data:: up = 0
.. data:: down = 1
"""
up = Enum.YLeaf(0, "up")
down = Enum.YLeaf(1, "down")
class NotificationFailureState(Enum):
"""
NotificationFailureState (Enum Class)
Notification failure state
.. data:: notf_failure_state_ok = 0
Notification failure state ok
.. data:: notf_failure_state_failed = 1
Notification failure state failed
"""
notf_failure_state_ok = Enum.YLeaf(0, "notf-failure-state-ok")
notf_failure_state_failed = Enum.YLeaf(1, "notf-failure-state-failed")
class NotificationModuleState(Enum):
"""
NotificationModuleState (Enum Class)
Notification module state
.. data:: notf_module_state_inserted = 0
Notification module inserted state
.. data:: notf_module_state_removed = 1
Notification module removed state
"""
notf_module_state_inserted = Enum.YLeaf(0, "notf-module-state-inserted")
notf_module_state_removed = Enum.YLeaf(1, "notf-module-state-removed")
class NotificationSensorState(Enum):
"""
NotificationSensorState (Enum Class)
Notification sensor state
.. data:: sensor_state_green = 0
Sensor state green
.. data:: sensor_state_yellow = 1
Sensor state yellow
.. data:: sensor_state_red = 2
Sensor state red
"""
sensor_state_green = Enum.YLeaf(0, "sensor-state-green")
sensor_state_yellow = Enum.YLeaf(1, "sensor-state-yellow")
sensor_state_red = Enum.YLeaf(2, "sensor-state-red")
class NotificationSeverity(Enum):
"""
NotificationSeverity (Enum Class)
Notification severity
.. data:: critical = 0
.. data:: major = 1
.. data:: minor = 2
"""
critical = Enum.YLeaf(0, "critical")
major = Enum.YLeaf(1, "major")
minor = Enum.YLeaf(2, "minor")
class OspfIntfState(Enum):
"""
OspfIntfState (Enum Class)
OSPF interface states
.. data:: ospf_ifs_down = 0
.. data:: ospf_ifs_loopback = 1
.. data:: ospf_ifs_waiting = 2
.. data:: ospf_ifs_point_to_m_point = 3
.. data:: ospf_ifs_point_to_point = 4
.. data:: ospf_ifs_dr = 5
.. data:: ospf_ifs_backup = 6
.. data:: ospf_ifs_dr_other = 7
.. data:: ospf_ifs_depend_upon = 8
"""
ospf_ifs_down = Enum.YLeaf(0, "ospf-ifs-down")
ospf_ifs_loopback = Enum.YLeaf(1, "ospf-ifs-loopback")
ospf_ifs_waiting = Enum.YLeaf(2, "ospf-ifs-waiting")
ospf_ifs_point_to_m_point = Enum.YLeaf(3, "ospf-ifs-point-to-m-point")
ospf_ifs_point_to_point = Enum.YLeaf(4, "ospf-ifs-point-to-point")
ospf_ifs_dr = Enum.YLeaf(5, "ospf-ifs-dr")
ospf_ifs_backup = Enum.YLeaf(6, "ospf-ifs-backup")
ospf_ifs_dr_other = Enum.YLeaf(7, "ospf-ifs-dr-other")
ospf_ifs_depend_upon = Enum.YLeaf(8, "ospf-ifs-depend-upon")
class OspfNbrState(Enum):
"""
OspfNbrState (Enum Class)
OSPF neighbor states
.. data:: ospf_nbr_down = 0
.. data:: ospf_nbr_attempt = 1
.. data:: ospf_nbr_init = 2
.. data:: ospf_nbr_two_way = 3
.. data:: ospf_nbr_exstart = 4
.. data:: ospf_nbr_exchange = 5
.. data:: ospf_nbr_loading = 6
.. data:: ospf_nbr_full = 7
.. data:: ospf_nbr_deleted = 8
.. data:: ospf_nbr_depend_upon = 9
"""
ospf_nbr_down = Enum.YLeaf(0, "ospf-nbr-down")
ospf_nbr_attempt = Enum.YLeaf(1, "ospf-nbr-attempt")
ospf_nbr_init = Enum.YLeaf(2, "ospf-nbr-init")
ospf_nbr_two_way = Enum.YLeaf(3, "ospf-nbr-two-way")
ospf_nbr_exstart = Enum.YLeaf(4, "ospf-nbr-exstart")
ospf_nbr_exchange = Enum.YLeaf(5, "ospf-nbr-exchange")
ospf_nbr_loading = Enum.YLeaf(6, "ospf-nbr-loading")
ospf_nbr_full = Enum.YLeaf(7, "ospf-nbr-full")
ospf_nbr_deleted = Enum.YLeaf(8, "ospf-nbr-deleted")
ospf_nbr_depend_upon = Enum.YLeaf(9, "ospf-nbr-depend-upon")
class UtdIpsAlertActionVal(Enum):
"""
UtdIpsAlertActionVal (Enum Class)
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert action
.. data:: utd_ips_alert_action_unknown = 0
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert action is unknown
.. data:: utd_ips_alert_action_alert = 1
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert action generated an alert
.. data:: utd_ips_alert_action_drop = 2
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert action resulted in a drop
.. data:: utd_ips_alert_action_wdrop = 3
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert action would have resulted in a drop if running in IPS mode
"""
utd_ips_alert_action_unknown = Enum.YLeaf(0, "utd-ips-alert-action-unknown")
utd_ips_alert_action_alert = Enum.YLeaf(1, "utd-ips-alert-action-alert")
utd_ips_alert_action_drop = Enum.YLeaf(2, "utd-ips-alert-action-drop")
utd_ips_alert_action_wdrop = Enum.YLeaf(3, "utd-ips-alert-action-wdrop")
class UtdIpsAlertClassificationVal(Enum):
"""
UtdIpsAlertClassificationVal (Enum Class)
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification
.. data:: utd_ips_alert_classification_none = 0
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is not set
.. data:: utd_ips_alert_classification_not_suspicious = 1
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is not suspicious traffic
.. data:: utd_ips_alert_classification_unknown = 2
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is unknown traffic
.. data:: utd_ips_alert_classification_bad_unknown = 3
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is potentially bad traffic
.. data:: utd_ips_alert_classification_attempted_recon = 4
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is attempted information leak
.. data:: utd_ips_alert_classification_successful_recon_limited = 5
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is information leak
.. data:: utd_ips_alert_classification_successful_recon_largescale = 6
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is large scale information leak
.. data:: utd_ips_alert_classification_attempted_dos = 7
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is attempted denial of service
.. data:: utd_ips_alert_classification_successful_dos = 8
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is denial of service
.. data:: utd_ips_alert_classification_attempted_user = 9
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is attempted user privilege gain
.. data:: utd_ips_alert_classification_unsuccessful_user = 10
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is unsuccessful user privilege gain
.. data:: utd_ips_alert_classification_successful_user = 11
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is successful user privilege gain
.. data:: utd_ips_alert_classification_attempted_admin = 12
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is attempted administrator privilege gain
.. data:: utd_ips_alert_classification_successful_admin = 13
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is successful administrator privilege gain
.. data:: utd_ips_alert_classification_rpc_portmap_decode = 14
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is decode of an rpc query
.. data:: utd_ips_alert_classification_shellcode_detect = 15
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is executable code was detected
.. data:: utd_ips_alert_classification_string_detect = 16
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is a suspicious string was detected
.. data:: utd_ips_alert_classification_suspicious_filename_detect = 17
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is a suspicious filename was detected
.. data:: utd_ips_alert_classification_suspicious_login = 18
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is an attempted login using a suspicious username was detected
.. data:: utd_ips_alert_classification_system_call_detect = 19
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is a system call was detected
.. data:: utd_ips_alert_classification_tcp_connection = 20
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is a tcp connection was detected
.. data:: utd_ips_alert_classification_trojan_activity = 21
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is a network trojan was detected
.. data:: utd_ips_alert_classification_unusual_client_port_connection = 22
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is a client was using an unusual port
.. data:: utd_ips_alert_classification_network_scan = 23
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is detection of a network scan
.. data:: utd_ips_alert_classification_denial_of_service = 24
Unified Threat Defense (UTD) Intrusion Prevention System (IPS) alert classification is detection of a denial of | |
import logging
from typing import List, Dict, Set, Union, cast, Type
import pandas as pd
from genomics_data_index.storage.SampleSet import SampleSet
from genomics_data_index.storage.model.NucleotideMutationTranslater import NucleotideMutationTranslater
from genomics_data_index.storage.model.QueryFeature import QueryFeature
from genomics_data_index.storage.model.QueryFeatureHGVS import QueryFeatureHGVS
from genomics_data_index.storage.model.QueryFeatureHGVSGN import QueryFeatureHGVSGN
from genomics_data_index.storage.model.QueryFeatureMLST import QueryFeatureMLST
from genomics_data_index.storage.model.QueryFeatureMutation import QueryFeatureMutation
from genomics_data_index.storage.model.QueryFeatureMutationSPDI import QueryFeatureMutationSPDI
from genomics_data_index.storage.model.db import NucleotideVariantsSamples, Reference, ReferenceSequence, MLSTScheme, \
SampleMLSTAlleles, MLSTAllelesSamples, Sample
from genomics_data_index.storage.model.db import SampleNucleotideVariation
from genomics_data_index.storage.service import DatabaseConnection
from genomics_data_index.storage.service import SQLQueryInBatcherDict, SQLQueryInBatcherList
logger = logging.getLogger(__name__)
class FeatureExplodeUnknownError(Exception):
def __init__(self, msg: str):
super().__init__(msg)
class SampleService:
def __init__(self, database_connection: DatabaseConnection, sql_select_limit: int):
self._connection = database_connection
self._sql_select_limit = sql_select_limit
def get_samples_with_variants(self, reference_name: str) -> List[Sample]:
"""
Gets a list of all samples that have variants associated with the given reference genome name.
:reference_name: The reference genome name.
:return: A list of Samples with variants with respect to the reference genome name, empty list of no Samples.
"""
samples = self._connection.get_session().query(Sample) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.filter(Reference.name == reference_name) \
.all()
return samples
def feature_explode_unknown(self, feature: QueryFeature) -> List[QueryFeature]:
if isinstance(feature, QueryFeatureHGVSGN):
features_spdi = self.find_features_spdi_for_hgvsgn(feature)
if len(features_spdi) == 0:
raise FeatureExplodeUnknownError(f'feature={feature} is of type HGVSGN but the corresponding SPDI '
f'feature does not exist in the database. Cannot convert to unknown '
f'SPDI representation.')
else:
unknown_features = []
for feature in features_spdi:
unknown_features.extend(feature.to_unknown_explode())
return unknown_features
elif isinstance(feature, QueryFeatureHGVS):
if feature.is_nucleotide():
variants_hgvs = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._id_hgvs_c == feature.id) \
.all()
elif feature.is_protein():
variants_hgvs = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._id_hgvs_p == feature.id) \
.all()
else:
raise Exception(f'feature=[{feature}] is neither nucleotide or protein')
if len(variants_hgvs) == 0:
raise FeatureExplodeUnknownError(f'feature={feature} is of type HGVS but the corresponding SPDI '
f'feature does not exist in the database. Cannot convert to unknown '
f'SPDI representation.')
else:
unknown_features = []
for variants_sample_obj in variants_hgvs:
unknown_features.extend(QueryFeatureMutationSPDI(variants_sample_obj.spdi).to_unknown_explode())
return unknown_features
else:
return feature.to_unknown_explode()
def find_features_spdi_for_hgvsgn(self, feature: QueryFeatureHGVSGN) -> List[QueryFeatureMutationSPDI]:
if not isinstance(feature, QueryFeatureHGVSGN):
raise Exception(f'Cannot handle feature={feature}. Not of type {QueryFeatureHGVSGN.__name__}')
query = self._connection.get_session().query(NucleotideVariantsSamples).filter(
NucleotideVariantsSamples.sequence == feature.sequence)
if feature.has_gene():
query = query.filter(NucleotideVariantsSamples.annotation_gene_name == feature.gene)
if feature.is_nucleotide():
query = query.filter(NucleotideVariantsSamples.annotation_hgvs_c == feature.variant)
elif feature.is_protein():
query = query.filter(NucleotideVariantsSamples.annotation_hgvs_p == feature.variant)
else:
raise Exception(f'feature={feature} is neither protein nor nucleotide')
return [QueryFeatureMutationSPDI(s.spdi) for s in query.all()]
def get_samples_with_mlst_alleles(self, scheme_name: str) -> List[Sample]:
"""
Gets a list of all samples that have MLST alleles associated with the given scheme name.
:scheme_name: The scheme name.
:return: A list of Samples with MLST alleles with respect to the scheme name, empty list of no Samples.
"""
samples = self._connection.get_session().query(Sample) \
.join(Sample.sample_mlst_alleles) \
.join(SampleMLSTAlleles.scheme) \
.filter(MLSTScheme.name == scheme_name) \
.all()
return samples
def get_samples_with_variants_on_sequence(self, sequence_name: str) -> List[Sample]:
"""
Gets a list of all samples that have variants associated with the given sequence name.
:sequence_name: The sequence name.
:return: A list of Samples with variants with respect to the sequence name, empty list of no Samples.
"""
samples = self._connection.get_session().query(Sample) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.join(Reference.sequences) \
.filter(ReferenceSequence.sequence_name == sequence_name) \
.all()
return samples
def get_samples_associated_with_reference(self, reference_name: str) -> List[Sample]:
"""
Gets a list of all samples associated with a reference name.
:reference_name: The reference name.
:return: A list of Samples associated with the reference name or an empty list if no Samples.
"""
samples = self._connection.get_session().query(Sample) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.filter(Reference.name == reference_name) \
.all()
return samples
def get_samples_set_associated_with_reference(self, reference_name: str) -> SampleSet:
"""
Gets a list of all samples associated with a reference name.
:reference_name: The reference name.
:return: A list of Samples associated with the reference name or an empty list if no Samples.
"""
sample_ids = [i for i, in self._connection.get_session().query(Sample.id) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.filter(Reference.name == reference_name) \
.all()]
return SampleSet(sample_ids=sample_ids)
def create_dataframe_from_sample_set(self, present_set: SampleSet,
absent_set: SampleSet,
unknown_set: SampleSet,
queries_expression: str) -> pd.DataFrame:
sample_sets_status_list = [(present_set, 'Present'), (absent_set, 'Absent'), (unknown_set, 'Unknown')]
data = []
for sample_status in sample_sets_status_list:
sample_set = sample_status[0]
status = sample_status[1]
if not sample_set.is_empty():
samples = self.find_samples_by_ids(sample_set)
for sample in samples:
data.append([queries_expression, sample.name, sample.id, status])
return pd.DataFrame(data=data, columns=['Query', 'Sample Name', 'Sample ID', 'Status'])
def count_samples_associated_with_reference(self, reference_name: str) -> int:
return self._connection.get_session().query(Sample) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.filter(Reference.name == reference_name) \
.count()
def count_samples_associated_with_mlst_scheme(self, scheme_name: str) -> int:
return len(self.get_samples_with_mlst_alleles(scheme_name))
def get_samples(self) -> List[Sample]:
return self._connection.get_session().query(Sample).all()
def count_samples(self) -> int:
return self._connection.get_session().query(Sample).count()
def get_all_sample_ids(self) -> SampleSet:
ids_list = [id for id, in self._connection.get_session().query(Sample.id).all()]
return SampleSet(ids_list)
def get_existing_samples_by_names(self, sample_names: List[str]) -> List[Sample]:
return self._connection.get_session().query(Sample) \
.filter(Sample.name.in_(sample_names)) \
.all()
def which_exists(self, sample_names: List[str]) -> List[str]:
"""
Returns which of the given samples exist in the database.
:param sample_names: The list of sample names.
:return: A list of those passed sample names that exist in the database.
"""
samples = self._connection.get_session().query(Sample) \
.filter(Sample.name.in_(sample_names)) \
.all()
return [sample.name for sample in samples]
def get_sample(self, sample_name: str) -> Sample:
return self._connection.get_session().query(Sample) \
.filter(Sample.name == sample_name) \
.one()
def exists(self, sample_name: str):
return self._connection.get_session().query(Sample) \
.filter(Sample.name == sample_name).count() > 0
def find_samples_by_ids(self, sample_ids: Union[List[int], SampleSet]) -> List[Sample]:
if isinstance(sample_ids, SampleSet):
sample_ids = list(sample_ids)
query_batcher = SQLQueryInBatcherList(in_data=sample_ids, batch_size=self._sql_select_limit)
def handle_batch(sample_ids_batch: List[int]) -> List[Sample]:
return self._connection.get_session().query(Sample) \
.filter(Sample.id.in_(sample_ids_batch)) \
.all()
return query_batcher.process(handle_batch)
def get_variants_samples_by_variation_features(self, features: List[QueryFeatureMutation]) -> Dict[
str, NucleotideVariantsSamples]:
standardized_features_to_input_feature = {}
standardized_features_ids = set()
standardized_feature_hgvs_c_ids = set()
standardized_feature_hgvs_p_ids = set()
for feature in features:
if isinstance(feature, QueryFeatureMutationSPDI):
dbf = NucleotideMutationTranslater.to_db_feature(feature)
if dbf.id in standardized_features_to_input_feature:
standardized_features_to_input_feature[dbf.id].append(feature.id)
else:
standardized_features_to_input_feature[dbf.id] = [feature.id]
standardized_features_ids.add(dbf.id)
elif isinstance(feature, QueryFeatureHGVSGN):
logger.warning(f'feature=[{feature}] is a QueryFeatureHGVSGN and I do not handle it here.')
elif isinstance(feature, QueryFeatureHGVS):
if feature.is_nucleotide():
standardized_feature_hgvs_c_ids.add(feature.id)
elif feature.is_protein():
standardized_feature_hgvs_p_ids.add(feature.id)
else:
raise Exception(f'feature=[{feature}] is neither nucleotide or protein')
else:
raise Exception(f'Invalid type for feature=[{feature}]. '
f'Must be either {QueryFeatureMutationSPDI.__class__.__name__} or '
f'{QueryFeatureHGVS.__class__.__name__}')
if len(standardized_features_ids) > 0:
variants_spdi = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._spdi.in_(standardized_features_ids)) \
.all()
else:
variants_spdi = []
if len(standardized_feature_hgvs_c_ids) > 0:
variants_hgvs_c = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._id_hgvs_c.in_(standardized_feature_hgvs_c_ids)) \
.all()
else:
variants_hgvs_c = []
if len(standardized_feature_hgvs_p_ids) > 0:
variants_hgvs_p = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._id_hgvs_p.in_(standardized_feature_hgvs_p_ids)) \
.all()
else:
variants_hgvs_p = []
# Map back unstandardized IDs to the actual variant object
# Use this because some features can have multiple identifiers for the same feature
# (e.g., ref:10:A:T and ref:10:1:T). I want to make sure I map each passed id to the
# same object (that is, in this example, I want to return a dictionary with two keys, one for each ID)
unstandardized_variants = {}
for v in variants_spdi:
for vid in standardized_features_to_input_feature[v.spdi]:
unstandardized_variants[vid] = v
unstandardized_variants.update({v.id_hgvs_c: v for v in variants_hgvs_c})
unstandardized_variants.update({v.id_hgvs_p: v for v in variants_hgvs_p})
return unstandardized_variants
def _get_mlst_samples_by_mlst_features(self, features: List[QueryFeatureMLST]) -> List[MLSTAllelesSamples]:
feature_ids = list({f.id_no_prefix for f in features})
mlst_alleles = self._connection.get_session().query(MLSTAllelesSamples) \
.filter(MLSTAllelesSamples._sla.in_(feature_ids)) \
.all()
return mlst_alleles
def _get_feature_type(self, features: List[QueryFeature]) -> Type[QueryFeature]:
feature_types = {f.__class__ for f in features}
if len(feature_types) != 1:
raise Exception(f'Should only be one feature type but instead got: {feature_types}.')
else:
return feature_types.pop()
def find_unknown_sample_sets_by_features(self, features: List[QueryFeature]) -> Dict[str, SampleSet]:
unknown_to_features_dict = {}
unknown_features = []
for feature in features:
try:
unknown_features_exploded = self.feature_explode_unknown(feature)
unknown_features.extend(unknown_features_exploded)
for unknown_feature in unknown_features_exploded:
unknown_to_features_dict[unknown_feature.id] = feature
except FeatureExplodeUnknownError as e:
logger.warning(
f'Could not map feature={feature} to a set of unknown features. Will assume no unknowns exist.')
if len(unknown_features) > 0:
unknown_features_sets = self.find_sample_sets_by_features(unknown_features)
else:
unknown_features_sets = set()
features_to_unknown_sample_sets = {}
for uid in unknown_features_sets:
fid = unknown_to_features_dict[uid].id
sample_set = unknown_features_sets[uid]
# If we've already set this sample set with the same feature,
# We need to merge together the unknown sample sets
# This can occur if, e.g., we have a large deletion and are iterating over each
# Base in the deletion in turn (e.g., ref:10:ATT:A -> gets converted to
# ['ref:10:A:?', 'ref:11:T:?', 'ref:12:T:?'], we need to merge unknown sample results
# for each of these features in turn.
if fid in features_to_unknown_sample_sets:
previous_sample_set = features_to_unknown_sample_sets[fid]
features_to_unknown_sample_sets[fid] = previous_sample_set.union(sample_set)
else:
features_to_unknown_sample_sets[fid] = sample_set
return features_to_unknown_sample_sets
def find_sample_sets_by_features(self, features: List[QueryFeature]) -> Dict[str, SampleSet]:
feature_type = self._get_feature_type(features)
if issubclass(feature_type, QueryFeatureHGVSGN):
# In this case where I'm querying by gene name, first convert to SPDI features before lookup
# TODO: it's not the most efficient to do this as a loop, but it's easier to implement right now
hgvs_gn_id_to_sampleset = dict()
for feature in features:
| |
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as vtransforms
from typing import Type, Any, Callable, Union, List, Optional
import blocks
from torch.nn import init
import functools
####################
# Utility Functions
####################
def Identity(x):
return x
####################
# Pix2Pix by Isola
####################
class UnetGenerator(nn.Module):
"""
Create a Unet-based generator.
Source: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/f13aab8148bd5f15b9eb47b690496df8dadbab0c/models/networks.py#L436
"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer [nn.BatchNorm2d, nn.InstanceNorm2d]
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
#####################
# Rewritten for better interpretability
#####################
class Pix2Pix_Encoder_Block(nn.Module):
"""
<NAME>., <NAME>., <NAME>., & <NAME>. (2017). Image-to-Image Translation with Conditional Adversarial Networks. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 5967–5976. https://doi.org/10.1109/CVPR.2017.632
"""
def __init__(self, _in_channels, _out_channels, _kernel_size=(4,4), _stride=(2,2), _padding=(1,1), _dilation=(1,1), _normType="BatchNorm", use_bias=True):
super().__init__()
self.in_channels = _in_channels
self.out_channels = _out_channels
self.kernel_size = _kernel_size
self.stride = _stride
self.padding = _padding
self.dilation_rate = _dilation
self.normType = _normType
# Downsampling
self.conv2d_1 = nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation_rate,
bias=use_bias)
# Norms
if self.normType is not None:
if self.normType == 'BatchNorm':
self.norm = nn.BatchNorm2d(num_features=self.out_channels, affine=True)
if self.normType == 'InstanceNorm':
self.norm = nn.InstanceNorm2d(num_features=self.out_channels, affine=True)
# ReLU
self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=False)
def forward(self, x: Tensor) -> Tensor:
out = self.conv2d_1(x)
if self.normType is not None:
out = self.norm(out)
out = self.relu(out)
return out
class Pix2Pix_DecoderBlock(nn.Module):
"""
<NAME>., <NAME>., <NAME>., & <NAME>. (2017). Image-to-Image Translation with Conditional Adversarial Networks. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 5967–5976. https://doi.org/10.1109/CVPR.2017.632
"""
def __init__(self, _in_channels, _out_channels, _kernel_size=(4,4), _stride=(2,2), _padding=(1,1), _dilation=(1,1), _normType="BatchNorm", use_bias=True, _dropoutType = "normal", _dropRate=0.5):
super().__init__()
self.in_channels = _in_channels
self.out_channels = _out_channels
self.kernel_size = _kernel_size
self.stride = _stride
self.padding = _padding
self.dilation_rate = _dilation
self.normType = _normType
self.dropoutType = _dropoutType
self.dropRate = _dropRate
self.upsampleConv = nn.ConvTranspose2d(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation_rate,
bias=use_bias)
# Norms
if self.normType is not None:
if self.normType == 'BatchNorm':
self.norm = nn.BatchNorm2d(num_features=self.out_channels, affine=True)
if self.normType == 'InstanceNorm':
self.norm = nn.InstanceNorm2d(num_features=self.out_channels, affine=True)
# ReLU
self.relu = nn.ReLU()
# Dropout
if self.dropoutType is not None:
if self.dropoutType == "normal":
self.dropout = nn.Dropout(p=self.dropRate, inplace=False)
if self.dropoutType == "ADL":
self.dropout = blocks.ADL(drop_rate=self.dropRate, gamma=0.9)
def forward(self, x: Tensor, skip_tensor: Tensor) -> Tensor:
out = self.upsampleConv(x)
#print("after upsample: " + str(out.shape))
if self.normType is not None:
out = self.norm(out)
if self.dropoutType is not None:
out = self.dropout(out)
out = torch.cat((out, skip_tensor), 1)
#print("after cat: " + str(out.shape))
out = self.relu(out)
return out
class Custom_Written_Generator(nn.Module):
def __init__(self, input_array_shape, _first_out_channels = 64, _normType="BatchNorm", _dropoutType = "normal", _dropRate=0.5, _outputType="Tanh"):
super().__init__()
self.first_out_channels = _first_out_channels
self.input_array_shape = input_array_shape
self.outputType = _outputType
self.normType = _normType
if self.normType == "BatchNorm":
_use_bias = False
if self.normType == "InstanceNorm":
_use_bias = True
# INPUT
self.convInput = nn.Conv2d(in_channels=self.input_array_shape[1], out_channels=self.first_out_channels,
kernel_size=4, stride=2, padding=1, dilation=1,
bias=_use_bias)
self.lrelu = nn.LeakyReLU(0.2)
# ENCODER
self.enc1 = Pix2Pix_Encoder_Block( _in_channels=self.first_out_channels, _out_channels=self.first_out_channels*2, _normType=self.normType, use_bias=_use_bias)
self.enc2 = Pix2Pix_Encoder_Block( _in_channels=self.first_out_channels*2, _out_channels=self.first_out_channels*4, _normType=self.normType, use_bias=_use_bias)
self.enc3 = Pix2Pix_Encoder_Block( _in_channels=self.first_out_channels*4, _out_channels=self.first_out_channels*8, _normType=self.normType, use_bias=_use_bias)
self.enc4 = Pix2Pix_Encoder_Block( _in_channels=self.first_out_channels*8, _out_channels=self.first_out_channels*8, _normType=self.normType, use_bias=_use_bias)
self.enc5 = Pix2Pix_Encoder_Block( _in_channels=self.first_out_channels*8, _out_channels=self.first_out_channels*8, _normType=self.normType, use_bias=_use_bias)
self.enc6 = Pix2Pix_Encoder_Block( _in_channels=self.first_out_channels*8, _out_channels=self.first_out_channels*8, _normType=self.normType, use_bias=_use_bias)
self.enc7 = Pix2Pix_Encoder_Block( _in_channels=self.first_out_channels*8, _out_channels=self.first_out_channels*8, _normType=self.normType, use_bias=_use_bias)
input_spatial = (int(self.input_array_shape[2]*(0.5**7)), int(self.input_array_shape[3]*(0.5**7)) )
# Bridge
#same_padding = (input_spatial[0]//2 - 1 + 4//2 , input_spatial[1]//2 - 1 + 4//2)
self.bridgeConv = nn.Conv2d(in_channels=self.first_out_channels*8,
out_channels=self.first_out_channels*8,
kernel_size=4,
stride=2,
padding=1, #same_padding,
dilation=1,
bias=_use_bias)
self.bridgeRelu = nn.ReLU()
# Decoder.
self.dec7 = Pix2Pix_DecoderBlock( _in_channels=self.first_out_channels*8, _out_channels=self.first_out_channels*8, _normType=self.normType, use_bias=_use_bias)
self.dec6 = Pix2Pix_DecoderBlock( _in_channels=self.first_out_channels*16, _out_channels=self.first_out_channels*8, _normType=self.normType, use_bias=_use_bias)
self.dec5 = Pix2Pix_DecoderBlock( _in_channels=self.first_out_channels*16, _out_channels=self.first_out_channels*8, _normType=self.normType, use_bias=_use_bias)
self.dec4 = Pix2Pix_DecoderBlock( _in_channels=self.first_out_channels*16, _out_channels=self.first_out_channels*8, _normType=self.normType, use_bias=_use_bias, _dropoutType=None)
self.dec3 = Pix2Pix_DecoderBlock( _in_channels=self.first_out_channels*16, _out_channels=self.first_out_channels*4, _normType=self.normType, use_bias=_use_bias, _dropoutType=None)
self.dec2 = Pix2Pix_DecoderBlock( _in_channels=self.first_out_channels*8, _out_channels=self.first_out_channels*2, _normType=self.normType, use_bias=_use_bias, _dropoutType=None)
self.dec1 = Pix2Pix_DecoderBlock( _in_channels=self.first_out_channels*4, _out_channels=self.first_out_channels, _normType=self.normType, use_bias=_use_bias, _dropoutType=None)
# Output
input_spatial = input_array_shape[2:4]
#same_padding = (input_spatial[0]//2 - 1 + 4//2 , input_spatial[1]//2 - 1 + 4//2 )
self.output_conv = nn.ConvTranspose2d(
in_channels=self.first_out_channels*2,
out_channels=self.input_array_shape[1],
kernel_size=(4,4),
stride=(2,2),
padding=(1,1),
dilation=(1,1),
bias=True)
if self.outputType == "Tanh":
self.outImage = nn.Tanh()
if self.outputType == "Sigmoid":
self.outImage = nn.Sigmoid()
def forward(self, x: Tensor) -> Tensor:
# Encode
out = self.convInput(x)
skip1 = self.lrelu(out)
skip2 = self.enc1(skip1)
skip3 = self.enc2(skip2)
skip4 = self.enc3(skip3)
skip5 = self.enc4(skip4)
skip6 = self.enc5(skip5)
skip7 = self.enc6(skip6)
# Bridge
out = self.bridgeConv(skip7)
out = self.bridgeRelu(out)
# Decode
out = self.dec7(out, skip7)
out = self.dec6(out, skip6)
out = self.dec5(out, skip5)
out = self.dec4(out, skip4)
out = self.dec3(out, skip3)
out = self.dec2(out, skip2)
out = self.dec1(out, skip1)
# Output
out = self.output_conv(out)
out = self.outImage(out)
return out
""""""
class Discriminator_Pix2Pix(nn.Module):
"""
The 70x70 PatchGAN from Isola et al.
Implementation guided by code from:
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/f13aab8148bd5f15b9eb47b690496df8dadbab0c/models/networks.py#L538
LOGITS output -- use BCEWithLogitsLoss
Paper:
<NAME>., <NAME>., <NAME>., & <NAME>. (2017). Image-to-Image Translation with Conditional Adversarial Networks. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 5967–5976. https://doi.org/10.1109/CVPR.2017.632
"""
def __init__(self, _input_array_size, _first_out_channels=64, _normType="BatchNorm", spectral_normalize=False):
super().__init__()
self.input_array_size = _input_array_size
self.first_out_channels = _first_out_channels
self.normType = _normType
if self.normType == "BatchNorm":
normlayer = nn.BatchNorm2d
use_bias = False
if self.normType == "InstanceNorm":
normlayer = nn.InstanceNorm2d
use_bias = True
if spectral_normalize:
self.normalization_function = nn.utils.spectral_norm
else:
self.normalization_function = Identity
self.conv1 = self.normalization_function(nn.Conv2d(in_channels=self.input_array_size[1],
out_channels=self.first_out_channels,
kernel_size=(4,4),
stride=(2,2),
padding=(1,1), #same_padding,
dilation=(1,1),
bias=use_bias))
_out_channels2 = self.first_out_channels*2
self.conv2 = self.normalization_function(nn.Conv2d(in_channels=self.first_out_channels,
out_channels=_out_channels2,
kernel_size=(4,4),
stride=(2,2),
padding=(1,1), #same_padding,
dilation=(1,1),
bias=use_bias))
self.BN2 = normlayer(num_features=_out_channels2, | |
import argparse
import configparser
import ntpath
import shutil
import time
import sys, os, re
from context import diana
import diana.classes.drug as diana_drug
import diana.classes.functional_analysis as functional_analysis
import diana.classes.network_analysis as network_analysis
import diana.classes.top_scoring as top_scoring
def main():
"""
Generate profiles for drugs using GUILD.
Optimized for Python 3.
python /home/quim/PHD/Projects/DIANA/diana/scripts/generate_profiles.py -j entresto -d entresto -sif /home/quim/PHD/Projects/DIANA/diana/data/network_cheng.sif
python /home/quim/PHD/Projects/DIANA/diana/scripts/generate_profiles.py -d DB11699 -sif /home/quim/PHD/Projects/DIANA/diana/data/network_cheng.sif
"""
options = parse_user_arguments()
generate_profiles(options)
def parse_user_arguments(*args, **kwds):
"""
Parses the arguments of the program
"""
parser = argparse.ArgumentParser(
description = "Generate the profiles of the input drug",
epilog = "@oliva's lab 2020")
parser.add_argument('-j','--job_id',dest='job_id',action = 'store',
help = 'Identifier of the job. It will be used to create a directory with the name of the identifier to store the results')
parser.add_argument('-d','--drug_name',dest='drug_name',action = 'store',
help = """ Name of the drug. If you do not provide targets for this drug or the number of targets is not large enough,
the program will use this name to search for targets in BIANA database. If targets are provided, this field will be only used
for naming purposes and will be completely optional.
If the name of the drug has more than one word or special characters (parentheses, single quotes), introduce the name between
double quotes. """)
parser.add_argument('-t','--targets',dest='targets',action = 'store',
help = 'Input file with the targets of the drug. Each target must be separated by a newline character.')
parser.add_argument('-pt','--proteins_type_id',dest='proteins_type_id',action = 'store', default='geneid',
help = 'Input the type of ID of the targets introduced / proteins of the network. It must be the same! (default is geneid).')
parser.add_argument('-sif','--sif_file',dest='sif',action = 'store',
help = 'Input file with a protein-protein interaction network in SIF format.')
parser.add_argument('-th','--threshold_list',dest='threshold_list',action = 'store',
help = """List of percentages that will be used as cut-offs to define the profiles of the drugs. It has to be a file containing:
- Different numbers that will be the threshold values separated by newline characters.
For example, a file called "top_threshold.list" containing:
0.1
0.5
1
5
10
functions
""")
parser.add_argument('-ws','--workspace',dest='workspace',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace'),
help = """Define the workspace directory where the data directory and the results directory will be created""")
options=parser.parse_args()
return options
#################
#################
# MAIN FUNCTION #
#################
#################
def generate_profiles(options):
"""
Generates the profiles of the input drug
"""
# Start marker for time measure
start = time.time()
print("\n\t\t-----------------------------------------------------------------------------------------------------------------------\n")
print("\t\tStarting Drug Interactions ANAlysis (DIANA), a program created by @OLIVA'S LAB. First part: Generation of drug profiles\n")
print("\t\t-----------------------------------------------------------------------------------------------------------------------\n")
# Get the script path and define directories used
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
scripts_dir = os.path.join(main_path, 'scripts')
mappings_dir = os.path.join(main_path, 'mappings')
data_dir = os.path.join(main_path, 'data')
workspace_dir = options.workspace
create_directory(workspace_dir)
# Create a directory for the data
profiles_dir = os.path.join(workspace_dir, 'profiles')
create_directory(profiles_dir)
# Create directories for additional data
other_data_dir = os.path.join(workspace_dir, 'additional_data')
create_directory(other_data_dir)
random_networks_dir = os.path.join(other_data_dir, 'random_networks')
create_directory(random_networks_dir)
associations_dir = os.path.join(other_data_dir, 'gene_function_associations')
create_directory(associations_dir)
# Create a drug instance
if options.drug_name:
drug_instance = diana_drug.Drug(options.drug_name)
else:
print(' DIANA INFO:\tdrug_name parameter is missing. Please, introduce the parameter -d with the name of the drug.\n')
sys.exit(10)
#--------------------------------------#
# GET INFORMATION FROM CONFIG FILE #
#--------------------------------------#
# Read the config file
config_file = os.path.join(main_path, 'config.ini')
config = configparser.ConfigParser()
config.read(config_file)
#------------------------#
# TARGETS CONTROLLER #
#------------------------#
# TARGETS CONTROLLER: Checks the targets provided by the user. If necessary, performs a search
# in BIANA database to obtain more targets
# Check if the targets file is provided
if options.targets and fileExist(options.targets):
drug_instance.obtain_targets_from_file(options.targets, options.proteins_type_id)
else:
# Obtain the targets from a table
drug_to_targets_file = os.path.join(mappings_dir, 'drugbank_geneid_drug_to_targets.txt')
drug_mapping_file = os.path.join(mappings_dir, 'drugbank_drug_mappings.txt')
if fileExist(drug_to_targets_file) and fileExist(drug_mapping_file):
# First, translate the drug input name to drugbankid (if necessary)
drugbankids = drug_instance.obtain_drugbankids_from_table(drug_mapping_file)
# Then, get the targets (in geneid) from the table
drug_instance.obtain_targets_from_table(drugbankids, drug_to_targets_file, target_type_id='geneid')
else:
if not fileExist(drug_to_targets_file):
print(" DIANA INFO:\tMissing drug to targets file: {}.\n".format(drug2targets_file))
if not fileExist(drug_mapping_file):
print(" DIANA INFO:\tMissing drug mappings file: {}.\n".format(drug_mapping_file))
sys.exit(10)
print( " DIANA INFO:\tThe targets provided for the drug {} are:\n\t\t{}.\n".format( options.drug_name, ', '.join([ str(x) for x in drug_instance.targets]) ) )
#--------------------#
# SIF CONTROLLER #
#--------------------#
# SIF CONTROLLER: Checks the network in SIF format provided by the user.
# Check if the network file is provided
if options.sif and fileExist(options.sif):
# If the network file is provided, we create a Network instance
network_instance = network_analysis.Network(network_file=options.sif, type_id=options.proteins_type_id, network_format='sif')
# We search for the targets in the network
drug_instance.targets_in_network = network_instance.get_targets_in_network(drug_instance.targets)
# We create a directory in the random networks directory for this network
network_filename = ntpath.basename(options.sif)
random_networks_dir = os.path.join(random_networks_dir, network_filename)
create_directory(random_networks_dir)
# We create a directory in gene function associations directory for this network
network_associations_dir = os.path.join(associations_dir, network_filename)
create_directory(network_associations_dir)
# We create a directory in gene function associations directory for targets
target_associations_dir = os.path.join(associations_dir, 'targets')
create_directory(target_associations_dir)
else:
# If not, we output an error
print(' DIANA INFO:\tThe network SIF file is missing. Please, introduce the parameter -sif.\n\t\tIf you do not have a network, use one of the networks in the sif folder.\n')
sys.exit(10)
# Check if the number of targets provided is sufficient for the analysis
if len(drug_instance.targets_in_network) < 1:
raise diana_drug.InsufficientTargets(drug_instance.targets_in_network, 1)
else:
print( " DIANA INFO:\tThe targets found in the network are:\n\t\t{}.\n".format( ', '.join([ str(x) for x in drug_instance.targets_in_network]) ) )
#------------------------------------------#
# CREATE DIRECTORIES AND GENERAL FILES #
#------------------------------------------#
# Create a directory for the drug
if options.job_id and options.job_id != '':
drug_id = options.job_id
else:
drug_id = diana_drug.generate_diana_id(drug_instance.drug_name, drug_instance.targets, network_filename)
drug_dir = os.path.join(profiles_dir, drug_id)
create_directory(drug_dir)
print(' DIANA INFO:\tThe ID given to the drug, which will be used to create a directory and store the results, is: {}\n'.format(drug_id))
# Create a directory for the Target results
targets_dir = os.path.join(drug_dir, 'target_profiles')
create_directory(targets_dir)
# Create a directory for the GUILD results
guild_dir = os.path.join(drug_dir, 'guild_profiles')
create_directory(guild_dir)
# Create a directory for the Structure results
structure_dir = os.path.join(drug_dir, 'structure_profiles')
create_directory(structure_dir)
# Create a directory for the ATCs results
atc_dir = os.path.join(drug_dir, 'atc_profiles')
create_directory(atc_dir)
# Create a directory for the dcse results
se_dir = os.path.join(drug_dir, 'se_profiles')
create_directory(se_dir)
# Create a targets file
targets_file = os.path.join(targets_dir, '{}_targets.txt'.format(drug_instance.drug_name))
diana_drug.create_targets_file(drug_instance.targets, targets_file)
#------------------------------#
# CREATE ASSOCIATION FILES #
#------------------------------#
# Define parameters for the functional enrichment
type_functions = ['gobp', 'gomf', 'reactome']
type_corrections = ['fdr_bh', 'bonferroni']
# Check if the gene-function association files are created
functions_data_dir = os.path.join(data_dir, 'functions_data')
for type_function in type_functions:
# Association files for the network
associations_file = os.path.join(network_associations_dir, '{}_to_gene.txt'.format(type_function))
if not fileExist(associations_file):
# Create associations file for GUILD (using the geneids of the network)
functional_analysis.create_association_file(all_geneids=network_instance.get_nodes(), type_function=type_function, taxID=9606, output_file=associations_file, functions_data_dir=functions_data_dir)
# Association files for all targets
associations_file = os.path.join(target_associations_dir, '{}_to_gene.txt'.format(type_function))
if not fileExist(associations_file):
# Get all geneids associated to targets
drugbank_geneid_mapping_file = os.path.join(mappings_dir, 'drugbank_geneid_drug_target_interactions.txt')
targets = diana_drug.get_all_targets_from_mappings(drugbank_geneid_mapping_file)
# Create associations file for targets (using all geneids)
functional_analysis.create_association_file(all_geneids=targets, type_function=type_function, taxID=9606, output_file=associations_file, functions_data_dir=functions_data_dir)
#--------------------------------#
# SCORING OF NETWORKS (GUILD) #
#--------------------------------#
# Run GUILD
print(" DIANA INFO:\tRunning GUILD (network scoring program).\n")
# Create a directory for GUILD results
guild_output_dir = os.path.join(drug_dir, 'guild_output')
create_directory(guild_output_dir)
# Create targets file for the targets in the network (it will be used by GUILD)
network_targets_file = os.path.join(guild_output_dir, '{}_targets_in_network.txt'.format(drug_instance.drug_name))
diana_drug.create_targets_file(drug_instance.targets_in_network, network_targets_file)
# Run GUILD
scores_file = os.path.join(guild_output_dir, 'output_scores.sif.netcombo')
if not fileExist(scores_file):
guild_command = 'python {} {} {} {} {} {} {}'.format( os.path.join(scripts_dir, 'run_guild.py'), drug_dir, network_targets_file, options.sif, guild_output_dir, random_networks_dir, config.get('Paths', 'guild_path') )
os.system(guild_command)
print(' DIANA INFO:\tGUILD has finished.\n')
# Remove files not needed
files_to_remove = ['edge_scores_netshort.sif', 'edge_scores.sif', 'node_scores_background.sif', 'node_scores.sif', 'output_scores.sif.netscore.log', 'output_scores.sif.netshort.log', 'output_scores.sif.netzcore.log', 'seed_scores_background.sif', 'seed_scores.sif']
for file_to_remove in files_to_remove:
if fileExist(os.path.join(guild_output_dir, file_to_remove)):
command = 'rm {}'.format(os.path.join(guild_output_dir, file_to_remove))
os.system(command)
else:
print(' DIANA INFO:\tThe scoring of the network with GUILD for {} was already done and it has been skipped.\n'.format(options.drug_name))
# Creating an instance of the file generated by GUILD
guild_profile_instance = network_analysis.GUILDProfile(scores_file, type_id=network_instance.type_id, top=100, top_type='percentage')
#-----------------------------#
# GENERATE GUILD PROFILES #
#-----------------------------#
print(' DIANA INFO:\tSTARTING GENERATION OF GUILD PROFILES\n')
# Copy the scores file at the guild directory
new_scores_file = os.path.join(guild_dir, 'output_scores.sif.netcombo')
shutil.copyfile(scores_file, new_scores_file)
# Score the network
scored_network_file = os.path.join(guild_dir, 'network_scored.txt')
if not fileExist(scored_network_file):
scored_network_instance = network_instance.score_network(guild_profile_instance.node_to_score, scored_network_file)
else:
scored_network_instance = network_analysis.EdgeProfile(network_file=scored_network_file, type_id=network_instance.type_id, network_format=network_instance.network_format, top=100)
# Get the list of thresholds to create the profiles
if options.threshold_list and |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.