input stringlengths 2.65k 237k | output stringclasses 1 value |
|---|---|
= int(8e6)
"""
################################################################################
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
6 4.05e+03 34.11 |
6 4.05e+03 34.11 | 34.11 0.4 38 0 | 0.05 0.80 0.84 0.33
6 1.17e+05 62.90 |
6 1.17e+05 62.90 | 62.90 0.7 38 0 | 0.11 0.33 14.94 0.28
6 1.96e+05 313.13 |
6 1.96e+05 313.13 | 313.13 0.8 140 0 | 0.13 0.59 30.46 0.20
6 2.56e+05 313.13 | 288.39 0.0 125 0 | 0.14 0.59 36.48 0.15
6 3.04e+05 313.13 | 268.59 0.0 117 0 | 0.14 0.49 32.35 0.14
6 3.44e+05 313.13 | 7.47 0.0 33 0 | 0.12 0.36 25.97 0.13
6 3.83e+05 1038.82 |
6 3.83e+05 1038.82 | 1038.82 0.2 1000 0 | 0.07 0.32 29.84 0.17
6 4.22e+05 1038.82 | 1028.53 0.0 1000 0 | 0.08 0.32 31.94 0.18
6 4.57e+05 1038.82 | 1024.58 0.0 1000 0 | 0.09 0.31 29.22 0.14
6 4.89e+05 1054.37 |
6 4.89e+05 1054.37 | 1054.37 6.0 1000 0 | 0.07 0.30 32.58 0.15
6 5.18e+05 1054.37 | 345.63 0.0 152 0 | 0.14 0.32 33.13 0.14
6 5.44e+05 1054.37 | 181.06 0.0 110 0 | 0.10 0.28 34.14 0.14
6 5.71e+05 1054.37 | 602.82 0.0 215 0 | 0.15 0.39 31.36 0.12
6 5.95e+05 1054.37 | 1035.55 0.0 1000 0 | 0.10 0.40 33.95 0.15
6 6.19e+05 1054.37 | 295.51 0.0 126 0 | 0.15 0.51 31.59 0.14
6 6.44e+05 1054.37 | 64.50 0.0 43 0 | 0.07 0.46 33.29 0.17
6 6.66e+05 1054.37 | 643.23 0.0 284 0 | 0.14 0.37 34.44 0.15
6 6.89e+05 1054.37 | 501.73 0.0 173 0 | 0.11 0.32 35.48 0.16
6 7.08e+05 1054.37 | 388.54 0.0 214 0 | 0.13 0.27 34.18 0.13
6 7.29e+05 1054.37 | 458.66 0.0 304 0 | 0.08 0.26 31.83 0.14
6 7.48e+05 1054.37 | 254.32 0.0 146 0 | 0.12 0.22 29.52 0.11
6 7.68e+05 1054.37 | 1028.87 0.0 1000 0 | 0.08 0.19 26.58 0.10
6 7.86e+05 1054.37 | 240.32 0.0 196 0 | 0.08 0.16 25.03 0.10
6 8.03e+05 1054.37 | 108.17 0.0 73 0 | 0.08 0.15 23.71 0.10
6 8.22e+05 1054.37 | 1029.23 0.0 1000 0 | 0.08 0.15 22.94 0.08
6 8.43e+05 1054.37 | 1039.32 0.0 1000 0 | 0.07 0.13 22.86 0.08
6 8.64e+05 1054.37 | 1036.23 0.0 1000 0 | 0.07 0.12 21.10 0.07
6 8.83e+05 2471.51 |
6 8.83e+05 2471.51 | 2471.51 2.0 1000 0 | 0.14 0.11 20.02 0.07
6 8.99e+05 2471.51 | 271.53 0.0 136 0 | 0.14 0.10 19.55 0.06
6 9.16e+05 2471.51 | 788.17 0.0 328 0 | 0.15 0.11 19.51 0.07
6 9.32e+05 2471.51 | 305.98 0.0 143 0 | 0.14 0.10 19.81 0.07
6 9.49e+05 2471.51 | 213.25 0.0 115 0 | 0.16 0.11 19.30 0.06
6 9.65e+05 2471.51 | 212.62 0.0 116 0 | 0.17 0.09 18.55 0.06
6 9.80e+05 2471.51 | 231.38 0.0 121 0 | 0.16 0.09 18.55 0.06
6 9.96e+05 2816.96 |
6 9.96e+05 2816.96 | 2816.96 1.5 1000 0 | 0.18 0.09 18.83 0.06
6 1.01e+06 2816.96 | 2526.01 0.0 1000 0 | 0.19 0.08 19.41 0.06
6 1.03e+06 2816.96 | 2502.35 0.0 1000 0 | 0.19 0.09 19.39 0.06
6 1.04e+06 2816.96 | 2792.51 0.0 1000 0 | 0.19 0.09 19.62 0.06
6 1.06e+06 2816.96 | 2807.12 18.1 1000 0 | 0.18 0.09 19.92 0.06
6 1.07e+06 2816.96 | 1274.49 0.0 452 0 | 0.20 0.08 19.97 0.06
6 1.08e+06 2827.08 |
6 1.08e+06 2827.08 | 2827.08 8.4 1000 0 | 0.19 0.08 20.43 0.06
6 1.10e+06 2827.08 | 1578.28 0.0 569 0 | 0.19 0.08 20.81 0.06
6 1.11e+06 2871.91 |
6 1.11e+06 2871.91 | 2871.91 0.9 1000 0 | 0.20 0.08 20.87 0.06
6 1.13e+06 2871.91 | 2688.35 0.0 1000 0 | 0.19 0.08 21.17 0.06
6 1.14e+06 2871.91 | 2681.34 0.0 1000 0 | 0.18 0.08 21.20 0.06
6 1.16e+06 2871.91 | 2810.03 0.0 1000 0 | 0.18 0.08 22.26 0.06
6 1.18e+06 2871.91 | 1660.03 0.0 584 0 | 0.18 0.08 21.62 0.06
6 1.19e+06 2871.91 | 2639.25 0.0 1000 0 | 0.18 0.07 21.81 0.06
6 1.21e+06 2871.91 | 939.60 0.0 312 0 | 0.18 0.07 21.66 0.06
6 1.22e+06 2871.91 | 2716.22 0.0 1000 0 | 0.18 0.08 21.85 0.06
6 1.24e+06 2871.91 | 2697.94 0.0 1000 0 | 0.19 0.07 21.58 0.06
6 1.25e+06 2871.91 | 2739.97 0.0 1000 0 | 0.19 0.07 22.32 0.06
6 1.26e+06 2871.91 | 2726.54 0.0 1000 0 | 0.19 0.07 22.24 0.06
6 1.27e+06 2871.91 | 2596.29 0.0 1000 0 | 0.20 0.07 22.64 0.06
6 1.29e+06 2871.91 | 2695.79 0.0 1000 0 | 0.19 0.07 22.49 0.06
6 1.30e+06 2871.91 | 2513.97 0.0 1000 0 | 0.18 0.07 22.17 0.06
6 1.31e+06 2871.91 | 2653.65 0.0 1000 0 | 0.20 0.06 21.88 0.06
6 1.32e+06 2871.91 | 2545.89 0.0 1000 0 | 0.19 0.06 22.11 0.06
6 1.34e+06 2871.91 | 2627.76 0.0 1000 0 | 0.17 0.06 22.41 0.06
6 1.35e+06 2871.91 | 2512.66 0.0 1000 0 | 0.18 0.06 21.69 0.06
6 1.36e+06 2871.91 | 2614.05 0.0 1000 0 | 0.19 0.06 22.41 0.06
6 1.37e+06 2871.91 | 2511.88 0.0 1000 0 | 0.18 0.06 22.86 0.06
6 1.38e+06 2871.91 | 2511.73 0.0 1000 0 | 0.18 0.06 22.40 0.06
6 1.39e+06 2871.91 | 2576.39 0.0 1000 0 | 0.18 0.06 22.36 0.06
6 1.40e+06 2871.91 | 2593.05 0.0 1000 0 | 0.18 0.06 22.56 0.06
6 1.42e+06 2871.91 | 2612.13 0.0 1000 0 | 0.19 0.06 22.38 0.06
6 1.43e+06 2871.91 | 2709.74 0.0 1000 0 | 0.18 0.06 22.08 0.06
6 1.44e+06 2871.91 | 2762.59 0.0 1000 0 | 0.18 0.06 22.09 0.06
6 1.45e+06 2871.91 | 2746.69 0.0 1000 0 | 0.18 0.06 21.86 0.06
6 1.46e+06 2871.91 | 2637.90 0.0 1000 0 | 0.18 0.06 22.09 0.06
6 1.48e+06 2871.91 | 2712.50 0.0 1000 0 | 0.18 0.05 22.10 0.06
6 1.49e+06 2871.91 | 2703.42 0.0 1000 0 | 0.18 0.06 22.62 0.06
6 1.50e+06 2871.91 | 2767.33 0.0 1000 0 | 0.17 0.05 21.92 0.06
6 1.51e+06 2919.71 |
6 1.51e+06 2919.71 | 2919.71 4.6 1000 0 | 0.18 0.06 22.39 0.06
6 1.52e+06 2919.71 | 2878.41 0.0 1000 0 | 0.18 0.06 22.34 0.06
6 1.54e+06 2919.71 | 2706.40 0.0 1000 0 | 0.19 0.06 22.33 0.05
6 1.55e+06 2919.71 | 2766.00 0.0 1000 0 | 0.19 0.05 22.25 0.05
6 1.56e+06 2919.71 | 2686.77 0.0 1000 0 | 0.19 0.05 22.11 0.05
6 1.57e+06 2919.71 | 2806.31 0.0 1000 0 | 0.19 0.05 22.06 0.05
6 1.58e+06 2919.71 | 2806.25 0.0 1000 0 | 0.18 0.05 21.52 0.05
6 1.59e+06 2919.71 | 2851.65 0.0 1000 0 | 0.19 0.05 21.15 0.05
6 1.60e+06 2919.71 | 2799.51 0.0 1000 0 | 0.18 0.05 21.29 0.05
6 1.61e+06 2919.71 | 2853.32 0.0 1000 0 | 0.18 0.05 21.43 0.05
6 1.62e+06 2919.71 | 2776.67 0.0 1000 0 | 0.18 0.05 21.82 0.05
6 1.63e+06 2919.71 | 2846.76 0.0 1000 0 | 0.18 0.05 21.45 0.05
6 1.64e+06 2919.71 | 2792.80 0.0 1000 0 | 0.19 0.05 21.70 0.05
6 1.65e+06 2919.71 | 2827.58 0.0 1000 0 | 0.19 0.05 21.97 0.05
6 1.66e+06 2919.71 | 2784.43 0.0 1000 0 | 0.19 0.05 21.33 0.05
6 1.67e+06 2919.71 | 2737.03 0.0 1000 0 | 0.19 0.04 21.52 0.05
6 1.68e+06 2919.71 | 2812.33 0.0 1000 0 | 0.18 0.04 21.24 0.05
6 1.69e+06 2919.71 | 2788.36 0.0 1000 0 | 0.18 0.05 21.54 0.05
6 1.70e+06 2919.71 | 2786.58 0.0 1000 0 | 0.19 0.04 21.31 0.05
6 1.71e+06 2919.71 | 2742.50 0.0 1000 0 | 0.18 0.04 21.51 0.05
6 1.72e+06 2919.71 | 2720.79 0.0 1000 0 | 0.19 0.05 21.70 0.05
6 1.73e+06 2919.71 | 2786.67 0.0 1000 0 | 0.18 0.04 21.73 0.05
6 1.74e+06 2919.71 | 2843.51 0.0 1000 0 | 0.19 0.05 21.66 0.05
6 1.75e+06 2919.71 | 2808.91 0.0 1000 0 | 0.19 0.05 21.34 0.05
6 1.76e+06 2919.71 | 2796.23 0.0 1000 0 | 0.19 0.04 21.98 0.05
6 1.78e+06 2919.71 | 2887.21 0.0 1000 0 | 0.19 0.05 21.69 0.05
6 1.79e+06 2919.71 | 2779.83 0.0 1000 0 | 0.18 0.05 21.79 0.05
6 1.80e+06 2919.71 | 2910.02 0.0 1000 0 | 0.19 0.05 21.81 0.05
6 1.81e+06 2978.96 |
6 1.81e+06 2978.96 | 2978.96 0.6 1000 0 | 0.19 0.04 22.15 0.05
6 1.82e+06 2990.69 |
6 1.82e+06 2990.69 | 2990.69 4.2 1000 | |
= kE2BT2+2
kY1 = kE2BT2+3
kY2 = kE2BT2+4
kY3 = kE2BT2+5
kY4 = kE2BT2+6
kY5 = kE2BT2+7
kY6 = kE2BT2+8
kY7 = kE2BT2+9
kY8 = kE2BT2+10
kY9 = kE2BT2+11
kY0 = kE2BT2+12
kX1T = kE2BT2+13
kY1T = kE2BT2+14
kX2T = kE2BT2+15
kY2T = kE2BT2+16
kX3T = kE2BT2+17
kY3T = kE2BT2+18
kX4T = kE2BT2+19
kY4T = kE2BT2+20
kX5T = kE2BT2+21
kY5T = kE2BT2+22
kX6T = kE2BT2+23
kY6T = kE2BT2+24
kR1 = kE2BT2+25
kR2 = kE2BT2+26
kR3 = kE2BT2+27
kR4 = kE2BT2+28
kR5 = kE2BT2+29
kR6 = kE2BT2+30
kGDB4 = kE2BT2+31
kGDB5 = kE2BT2+32
kGDB6 = kE2BT2+33
kPic4 = kE2BT2+34
kPic5 = kE2BT2+35
kPic6 = kE2BT2+36
kGDB7 = kE2BT2+37
kGDB8 = kE2BT2+38
kGDB9 = kE2BT2+39
kGDB0 = kE2BT2+40
kPic7 = kE2BT2+41
kPic8 = kE2BT2+42
kPic9 = kE2BT2+43
kPic0 = kE2BT2+44
kStatN = kE2BT2+45
kXMean = kE2BT2+46
kConj = kE2BT2+47
kReal = kE2BT2+48
kFAngle = kE2BT2+49
kLCM = kE2BT2+50
kGCD = kE2BT2+51
kRandInt = kE2BT2+52
kRandNorm = kE2BT2+53
kToPolar = kE2BT2+54
kToRect = kE2BT2+55
kYMean = kE2BT2+56
kStdX = kE2BT2+57
kStdX1 = kE2BT2+58
kw0 = kE2BT2+59
kMatF = kE2BT2+60
kMatG = kE2BT2+61
kMatRH = kE2BT2+62
kMatI = kE2BT2+63
kMatJ = kE2BT2+64
kYMean1 = kE2BT2+65
kStdY = kE2BT2+66
kStdY1 = kE2BT2+67
kMatToLst = kE2BT2+68
kLstToMat = kE2BT2+69
kCumSum = kE2BT2+70
kDeltaLst = kE2BT2+71
kStdDev = kE2BT2+72
kVariance = kE2BT2+73
kLength = kE2BT2+74
kEquToStrng = kE2BT2+75
kStrngToEqu = kE2BT2+76
kExpr = kE2BT2+77
kSubStrng = kE2BT2+78
kInStrng = kE2BT2+79
kStr1 = kE2BT2+80
kStr2 = kE2BT2+81
kStr3 = kE2BT2+82
kStr4 = kE2BT2+83
kStr5 = kE2BT2+84
kStr6 = kE2BT2+85
kStr7 = kE2BT2+86
kStr8 = kE2BT2+87
kStr9 = kE2BT2+88
kStr0 = kE2BT2+89
kFinN = kE2BT2+90
kFinI = kE2BT2+91
kFinPV = kE2BT2+92
kFinPMT = kE2BT2+93
kFinFV = kE2BT2+94
kFinPY = kE2BT2+95
kFinCY = kE2BT2+96
kFinFPMT = kE2BT2+97
kFinFI = kE2BT2+98
kFinFPV = kE2BT2+99
kFinFN = kE2BT2+100
kFinFFV = kE2BT2+101
kFinNPV = kE2BT2+102
kFinIRR = kE2BT2+103
kFinBAL = kE2BT2+104
kFinPRN = kE2BT2+105
kFinINT = kE2BT2+106
kSumX = kE2BT2+107
kSumX2 = kE2BT2+108
kFinToNom = kE2BT2+109
kFinToEff = kE2BT2+110
kFinDBD = kE2BT2+111
kStatVP = kE2BT2+112
kStatZ = kE2BT2+113
kStatT = kE2BT2+114
kStatChi = kE2BT2+115
kStatF = kE2BT2+116
kStatDF = kE2BT2+117
kStatPhat = kE2BT2+118
kStatPhat1 = kE2BT2+119
kStatPhat2 = kE2BT2+120
kStatMeanX1 = kE2BT2+121
kStatMeanX2 = kE2BT2+122
kStatStdX1 = kE2BT2+123
kStatStdX2 = kE2BT2+124
kStatStdXP = kE2BT2+125
kStatN1 = kE2BT2+126
kStatN2 = kE2BT2+127
kStatLower = kE2BT2+128
kStatUpper = kE2BT2+129
kuw0 = kE2BT2+130
kImag = kE2BT2+131
kSumY = kE2BT2+132
kXres = kE2BT2+133
kStat_s = kE2BT2+134
kSumY2 = kE2BT2+135
kSumXY = kE2BT2+136
kuXres = kE2BT2+137
kModBox = kE2BT2+138
kNormProb = kE2BT2+139
kNormalPDF = kE2BT2+140
kTPDF = kE2BT2+141
kChiPDF = kE2BT2+142
kFPDF = kE2BT2+143
kMinY = kE2BT2+144 # MinY
kRandBin = kE2BT2+145
kRef = kE2BT2+146
kRRef = kE2BT2+147
kLRSqr = kE2BT2+148
kBRSqr = kE2BT2+149
kDiagOn = kE2BT2+150
kDiagOff = kE2BT2+151
kun1 = kE2BT2+152 # FOR RCL USE WHEN GOTTEN FROM 82
kvn1 = kE2BT2+153 # FOR RCL USE WHEN GOTTEN FROM 82
#
k83_00End = kvn1 #end of original keys...
kArchive = k83_00End + 1
kUnarchive = k83_00End + 2
kAsm = k83_00End + 3 # Asm(
kAsmPrgm = k83_00End + 4 # AsmPrgm
kAsmComp = k83_00End + 5 # AsmComp(
#
kcapAAcute = k83_00End + 6
kcapAGrave = k83_00End + 7
kcapACaret = k83_00End + 8
kcapADier = k83_00End + 9
kaAcute = k83_00End + 10
kaGrave = k83_00End + 11
kaCaret = k83_00End + 12
kaDier = k83_00End + 13
kcapEAcute = k83_00End + 14
kcapEGrave = k83_00End + 15
kcapECaret = k83_00End + 16
kcapEDier = k83_00End + 17
keAcute = k83_00End + 18
keGrave = k83_00End + 19
keCaret = k83_00End + 20
keDier = k83_00End + 21
kcapIAcute = k83_00End + 22
kcapIGrave = k83_00End + 23
kcapICaret = k83_00End + 24
kcapIDier = k83_00End + 25
kiAcute = k83_00End + 26
kiGrave = k83_00End + 27
kiCaret = k83_00End + 28
kiDier = k83_00End + 29
kcapOAcute = k83_00End + 30
kcapOGrave = k83_00End + 31
kcapOCaret = k83_00End + 32
kcapODier = k83_00End + 33
koAcute = k83_00End + 34
koGrave = k83_00End + 35
koCaret = k83_00End + 36
koDier = k83_00End + 37
kcapUAcute = k83_00End + 38
kcapUGrave = k83_00End + 39
kcapUCaret = k83_00End + 40
kcapUDier = k83_00End + 41
kuAcute = k83_00End + 42
kuGrave = k83_00End + 43
kuCaret = k83_00End + 44
kuDier = k83_00End + 45
kcapCCed = k83_00End + 46
kcCed = k83_00End + 47
kcapNTilde = k83_00End + 48
knTilde = k83_00End + 49
kaccent = k83_00End + 50
kgrave = k83_00End + 51
kdieresis = k83_00End + 52
kquesDown = k83_00End + 53
kexclamDown = k83_00End + 54
kalpha = k83_00End + 55
kbeta = k83_00End + 56
kgamma = k83_00End + 57
kcapDelta = k83_00End + 58
kdelta = k83_00End + 59
kepsilon = k83_00End + 60
klambda = k83_00End + 61
kmu = k83_00End + 62
kpi2 = k83_00End + 63
krho = k83_00End + 64
kcapSigma = k83_00End + 65
ksigma = k83_00End + 66
ktau = k83_00End + 67
kphi = k83_00End + 68
kcapOmega = k83_00End + 69
kphat = k83_00End + 70
kchi2 = k83_00End + 71
kstatF2 = k83_00End + 72
kLa = k83_00End + 73
kLb = k83_00End + 74
kLc = k83_00End + 75
kLd = k83_00End + 76
kLe = k83_00End + 77
kLf = k83_00End + 78
kLg = k83_00End + 79
kLh = k83_00End + 80
kLi = k83_00End + 81
kLj = k83_00End + 82
kLk = k83_00End + 83
kLl = k83_00End + 84
kLm = k83_00End + 85
kLsmalln = k83_00End + 86
kLo = k83_00End + 87
kLp = k83_00End + 88
kLq = k83_00End + 89
kLsmallr = k83_00End + 90
kLs = k83_00End + 91
kLt = k83_00End + 92
kLu = k83_00End + 93
kLv = k83_00End + 94
kLw = k83_00End + 95
kLx = k83_00End + 96
kLy = k83_00End + 97
kLz = k83_00End + 98
kGarbageC = k83_00End + 99 # GarbageCollect
#
kE2BT2_End = kGarbageC
# the following keys were added in OS version 1.15
KE2BT3 = 0
#
kReserved = KE2BT3+1 #01 - 001d
kAtSign = KE2BT3+2 #02 - 002d
kPound = KE2BT3+3 #03 - 003d
kDollar = KE2BT3+4 #04 - 004d
kAmpersand = KE2BT3+5 #05 - 005d
kBackQuote = KE2BT3+6 #06 - 006d
kSemicolon = KE2BT3+7 #07 - 007d
kBackSlash = KE2BT3+8 #08 - 008d
kVertSlash = KE2BT3+9 #09 - 009d
kUnderscore = KE2BT3+10 #0A - 010d
kTilde = KE2BT3+11 #0B - 011d
kPercent = KE2BT3+12 #0C - 012d
kLastUsedK3 = kPercent
kTab = KE2BT3+13 #0D - 013d
kShftTaB = KE2BT3+14 #0E - 014d
kShftDel = KE2BT3+15 #0F - 015d
kShftBack = KE2BT3+16 #10 - 016d
kShftPgUp = KE2BT3+17 #11 - 017d
kShftPgDn = KE2BT3+18 #12 - 018d
kShftLeft = KE2BT3+19 #13 - 019d
kShftRight = KE2BT3+20 #14 - 020d
kShftUp = KE2BT3+21 #15 - 021d
kShftDn = KE2BT3+22 #16 - 022d
#
kDiamond = KE2BT3+23
#
kDiaAdd = kDiamond+0 #17 - 023d
kDiaSub = kDiamond+1 #18 - 024d
kDiaTilde = kDiamond+2 #19 - 025d
kDiaDiv = kDiamond+3 #1A - 026d
kDiaBkSlash = kDiamond+4 #1B - 027d
kDiaColon = kDiamond+5 #1C - 028d
kDiaQuote = kDiamond+6 #1D - 029d
kDiaLBrack = kDiamond+7 #1E - 030d
kDiaRBrack = kDiamond+8 #1F - 031d
kDiaBkSpace = kDiamond+9 #20 - 032d
kDiaEnter = kDiamond+10 #21 - 033d
kDiaComma = kDiamond+11 #22 - 034d
kDiaDel = kDiamond+12 #23 - 035d
kDiaDecPnt = kDiamond+13 #24 - 036d
kDia0 = kDiamond+14 #25 - 037d
kDia1 = kDiamond+15 #26 - 038d
kDia2 = kDiamond+16 #27 - 039d
kDia3 = kDiamond+17 #28 - 040d
kDia4 = kDiamond+18 #29 - 041d
kDia5 = kDiamond+19 #2A - 042d
kDia6 = kDiamond+20 #2B - 043d
kDia7 = kDiamond+21 #2C - 044d
kDia8 = kDiamond+22 #2D - 045d
kDia9 = kDiamond+23 #2E - 046d
kDiaTab = kDiamond+24 #2F - 047d
kDiaSpace = kDiamond+25 #30 - 048d
kDiaA = kDiamond+26 #31 - 049d
kDiaB = kDiamond+27 #32 - 050d
kDiaC = kDiamond+28 #33 - 051d
kDiaD = kDiamond+29 #34 - 052d
kDiaE = kDiamond+30 #35 - 053d
kDiaF = kDiamond+31 #36 - 054d
kDiaG = kDiamond+32 #37 - 055d
kDiaH = kDiamond+33 #38 - 056d
kDiaI = kDiamond+34 #39 - 057d
kDiaJ = kDiamond+35 #3A - 058d
kDiaK = kDiamond+36 #3B - 059d
kDiaL = kDiamond+37 #3C - 060d
kDiaM = kDiamond+38 #3D - 061d
kDiaN = kDiamond+39 #3E - 062d
kDiaO = kDiamond+40 #3F - 063d
kDiaP = kDiamond+41 #40 - 064d
kDiaQ = kDiamond+42 #41 - 065d
kDiaR = kDiamond+43 #42 - 066d
kDiaS = kDiamond+44 #43 - 067d
kDiaT = kDiamond+45 #44 - 068d
kDiaU = kDiamond+46 #45 - 069d
kDiaV = kDiamond+47 #46 - 070d
kDiaW = kDiamond+48 #47 - 071d
kDiaX = kDiamond+49 #48 - 072d
kDiaY = kDiamond+50 #49 - 073d
kDiaZ = kDiamond+51 #4A - 074d
kDiaPgUp = kDiamond+52 #4B - 075d
kDiaPgDn = kDiamond+53 #4C - 076d
kDiaLeft = kDiamond+54 #4D - 077d
kDiaRight = kDiamond+55 #4E - 078d
kDiaUp = kDiamond+56 #4F - 079d
kDiaDn = kDiamond+57 #50 - 080d
#
kdbSquare = kDiamond+58
#
kSqrAdd = kdbSquare+0 #51 - 081d
kSqrSub = kdbSquare+1 #52 - 082d
kSqrTilde = kdbSquare+2 #53 - 083d
kSqrDiv = kdbSquare+3 #54 - 084d
kSqrBkSlash = kdbSquare+4 #55 - 085d
kSqrColon = kdbSquare+5 #56 - 086d
kSqrQuote = kdbSquare+6 #57 - 087d
kSqrLBrack = kdbSquare+7 #58 - 088d
kSqrRBrack = kdbSquare+8 #59 - 089d
kSqrBkSpace = kdbSquare+9 #5A - 090d
kSqrEnter = kdbSquare+10 #5B - 091d
kSqrComma = kdbSquare+11 #5C - 092d
kSqrDel = kdbSquare+12 #5D - 093d
kSqrDecPnt = kdbSquare+13 #5E - 094d
kSqr0 = kdbSquare+14 #5F - 095d
kSqr1 = kdbSquare+15 #60 - 096d
kSqr2 = kdbSquare+16 #61 - 097d
kSqr3 = kdbSquare+17 #62 - 098d
kSqr4 = kdbSquare+18 #63 - 099d
kSqr5 = kdbSquare+19 #64 - 100d
kSqr6 = kdbSquare+20 #65 - 101d
kSqr7 = kdbSquare+21 #66 - 102d
kSqr8 = kdbSquare+22 #67 - 103d
kSqr9 = kdbSquare+23 #68 - 104d
kSqrTab = kdbSquare+24 #69 - 105d
kSqrSpace = kdbSquare+25 #6A - | |
<reponame>albinger/Adafruit_Learning_System_Guides<filename>PyPortal_EZ_Make_Oven/code.py
# SPDX-FileCopyrightText: 2019 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import json
import array
import math
import gc
import board
import busio
import audioio
import audiocore
import displayio
import digitalio
from adafruit_bitmap_font import bitmap_font
from adafruit_display_text import bitmap_label as label
from adafruit_display_shapes.circle import Circle
from adafruit_button import Button
import adafruit_touchscreen
from adafruit_mcp9600 import MCP9600
TITLE = "EZ Make Oven Controller"
VERSION = "1.3.2"
print(TITLE, "version ", VERSION)
time.sleep(2)
display_group = displayio.Group()
board.DISPLAY.show(display_group)
PROFILE_SIZE = 2 # plot thickness
GRID_SIZE = 2
GRID_STYLE = 3
TEMP_SIZE = 2
AXIS_SIZE = 2
BLACK = 0x0
BLUE = 0x2020FF
GREEN = 0x00FF55
RED = 0xFF0000
YELLOW = 0xFFFF00
WIDTH = board.DISPLAY.width
HEIGHT = board.DISPLAY.height
palette = displayio.Palette(5)
palette[0] = BLACK
palette[1] = GREEN
palette[2] = BLUE
palette[3] = RED
palette[4] = YELLOW
palette.make_transparent(0)
BACKGROUND_COLOR = 0
PROFILE_COLOR = 1
GRID_COLOR = 2
TEMP_COLOR = 3
AXIS_COLOR = 2
GXSTART = 100
GYSTART = 80
GWIDTH = WIDTH - GXSTART
GHEIGHT = HEIGHT - GYSTART
plot = displayio.Bitmap(GWIDTH, GHEIGHT, 4)
display_group.append(
displayio.TileGrid(plot, pixel_shader=palette, x=GXSTART, y=GYSTART)
)
ts = adafruit_touchscreen.Touchscreen(
board.TOUCH_XL,
board.TOUCH_XR,
board.TOUCH_YD,
board.TOUCH_YU,
calibration=((5200, 59000), (5800, 57000)),
size=(WIDTH, HEIGHT),
)
class Beep(object):
def __init__(self):
self.duration = 0
self.start = 0
tone_volume = 1 # volume is from 0.0 to 1.0
frequency = 440 # Set this to the Hz of the tone you want to generate.
length = 4000 // frequency
sine_wave = array.array("H", [0] * length)
for i in range(length):
sine_wave[i] = int(
(1 + math.sin(math.pi * 2 * i / length)) * tone_volume * (2 ** 15 - 1)
)
self.sine_wave_sample = audiocore.RawSample(sine_wave)
self._speaker_enable = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
self._speaker_enable.switch_to_output(False)
if hasattr(board, "AUDIO_OUT"):
self.audio = audioio.AudioOut(board.AUDIO_OUT)
elif hasattr(board, "SPEAKER"):
self.audio = audioio.AudioOut(board.SPEAKER)
else:
raise AttributeError("Board does not have a builtin speaker!")
# pylint: disable=protected-access
def play(self, duration=0.1):
if not self._speaker_enable.value:
self._speaker_enable.value = True
self.audio.play(self.sine_wave_sample, loop=True)
self.start = time.monotonic()
self.duration = duration
if duration <= 0.5:
# for beeps less than .5 sec, sleep here,
# otherwise, use refresh() in loop to turn off long beep
time.sleep(duration)
self.stop()
def stop(self):
if self._speaker_enable.value:
self.duration = 0
self.audio.stop()
self._speaker_enable.value = False
def refresh(self):
if time.monotonic() - self.start >= self.duration:
self.stop()
class ReflowOvenControl(object):
states = ("wait", "ready", "start", "preheat", "soak", "reflow", "cool")
def __init__(self, pin):
self.oven = digitalio.DigitalInOut(pin)
self.oven.direction = digitalio.Direction.OUTPUT
with open("/config.json", mode="r") as fpr:
self.config = json.load(fpr)
fpr.close()
self.sensor_status = False
with open("/profiles/" + self.config["profile"] + ".json", mode="r") as fpr:
self.sprofile = json.load(fpr)
fpr.close()
i2c = busio.I2C(board.SCL, board.SDA, frequency=100000)
try:
self.sensor = MCP9600(i2c, self.config["sensor_address"], "K")
self.ontemp = self.sensor.temperature
self.offtemp = self.ontemp
self.sensor_status = True
except ValueError:
print("temperature sensor not available")
self.control = False
self.reset()
self.beep = Beep()
self.set_state("ready")
if self.sensor_status:
if self.sensor.temperature >= 50:
self.last_state = "wait"
self.set_state("wait")
def reset(self):
self.ontime = 0
self.offtime = 0
self.enable(False)
self.reflow_start = 0
def get_profile_temp(self, seconds):
x1 = self.sprofile["profile"][0][0]
y1 = self.sprofile["profile"][0][1]
for point in self.sprofile["profile"]:
x2 = point[0]
y2 = point[1]
if x1 <= seconds < x2:
temp = y1 + (y2 - y1) * (seconds - x1) // (x2 - x1)
return temp
x1 = x2
y1 = y2
return 0
def set_state(self, state):
self.state = state
self.check_state()
self.last_state = state
# pylint: disable=too-many-branches, too-many-statements
def check_state(self):
try:
temp = self.sensor.temperature
except AttributeError:
temp = 32 # sensor not available, use 32 for testing
self.sensor_status = False
# message.text = "Temperature sensor missing"
self.beep.refresh()
if self.state == "wait":
self.enable(False)
if self.state != self.last_state:
# change in status, time for a beep!
self.beep.play(0.1)
if temp < 35:
self.set_state("ready")
oven.reset()
draw_profile(sgraph, oven.sprofile)
timer_data.text = format_time(0)
if self.state == "ready":
self.enable(False)
if self.state == "start" and temp >= 50:
self.set_state("preheat")
if self.state == "start":
message.text = "Starting"
self.enable(True)
if self.state == "preheat" and temp >= self.sprofile["stages"]["soak"][1]:
self.set_state("soak")
if self.state == "preheat":
message.text = "Preheat"
if self.state == "soak" and temp >= self.sprofile["stages"]["reflow"][1]:
self.set_state("reflow")
if self.state == "soak":
message.text = "Soak"
if (
self.state == "reflow"
and temp >= self.sprofile["stages"]["cool"][1]
and self.reflow_start > 0
and (
time.monotonic() - self.reflow_start
>= self.sprofile["stages"]["cool"][0]
- self.sprofile["stages"]["reflow"][0]
)
):
self.set_state("cool")
self.beep.play(5)
if self.state == "reflow":
message.text = "Reflow"
if self.last_state != "reflow":
self.reflow_start = time.monotonic()
if self.state == "cool":
self.enable(False)
message.text = "Cool Down, Open Door"
if self.state in ("start", "preheat", "soak", "reflow"):
if self.state != self.last_state:
# change in status, time for a beep!
self.beep.play(0.1)
# oven temp control here
# check range of calibration to catch any humps in the graph
checktime = 0
checktimemax = self.config["calibrate_seconds"]
checkoven = False
if not self.control:
checktimemax = max(
0,
self.config["calibrate_seconds"]
- (time.monotonic() - self.offtime),
)
while checktime <= checktimemax:
check_temp = self.get_profile_temp(int(timediff + checktime))
if (
temp + self.config["calibrate_temp"] * checktime / checktimemax
< check_temp
):
checkoven = True
break
checktime += 5
if not checkoven:
# hold oven temperature
if (
self.state in ("start", "preheat", "soak")
and self.offtemp > self.sensor.temperature
):
checkoven = True
self.enable(checkoven)
# turn oven on or off
def enable(self, enable):
try:
self.oven.value = enable
self.control = enable
if enable:
self.offtime = 0
self.ontime = time.monotonic()
self.ontemp = self.sensor.temperature
print("oven on")
else:
self.offtime = time.monotonic()
self.ontime = 0
self.offtemp = self.sensor.temperature
print("oven off")
except AttributeError:
# bad sensor
pass
class Graph(object):
def __init__(self):
self.xmin = 0
self.xmax = 720 # graph up to 12 minutes
self.ymin = 0
self.ymax = 240
self.xstart = 0
self.ystart = 0
self.width = GWIDTH
self.height = GHEIGHT
# pylint: disable=too-many-branches
def draw_line(self, x1, y1, x2, y2, size=PROFILE_SIZE, color=1, style=1):
# print("draw_line:", x1, y1, x2, y2)
# convert graph coords to screen coords
x1p = self.xstart + self.width * (x1 - self.xmin) // (self.xmax - self.xmin)
y1p = self.ystart + int(
self.height * (y1 - self.ymin) / (self.ymax - self.ymin)
)
x2p = self.xstart + self.width * (x2 - self.xmin) // (self.xmax - self.xmin)
y2p = self.ystart + int(
self.height * (y2 - self.ymin) / (self.ymax - self.ymin)
)
# print("screen coords:", x1p, y1p, x2p, y2p)
if (max(x1p, x2p) - min(x1p, x2p)) > (max(y1p, y2p) - min(y1p, y2p)):
for xx in range(min(x1p, x2p), max(x1p, x2p)):
if x2p != x1p:
yy = y1p + (y2p - y1p) * (xx - x1p) // (x2p - x1p)
if style == 2:
if xx % 2 == 0:
self.draw_point(xx, yy, size, color)
elif style == 3:
if xx % 8 == 0:
self.draw_point(xx, yy, size, color)
elif style == 4:
if xx % 12 == 0:
self.draw_point(xx, yy, size, color)
else:
self.draw_point(xx, yy, size, color)
else:
for yy in range(min(y1p, y2p), max(y1p, y2p)):
if y2p != y1p:
xx = x1p + (x2p - x1p) * (yy - y1p) // (y2p - y1p)
if style == 2:
if yy % 2 == 0:
self.draw_point(xx, yy, size, color)
elif style == 3:
if yy % 8 == 0:
self.draw_point(xx, yy, size, color)
elif style == 4:
if yy % 12 == 0:
self.draw_point(xx, yy, size, color)
else:
self.draw_point(xx, yy, size, color)
def draw_graph_point(self, x, y, size=PROFILE_SIZE, color=1):
""" draw point using graph coordinates """
# wrap around graph point when x goes out of bounds
x = (x - self.xmin) % (self.xmax - self.xmin) + self.xmin
xx = self.xstart + self.width * (x - self.xmin) // (self.xmax - self.xmin)
yy = self.ystart + int(self.height * (y - self.ymin) / (self.ymax - self.ymin))
print("graph point:", x, y, xx, yy)
self.draw_point(xx, max(0 + size, yy), size, color)
def draw_point(self, x, y, size=PROFILE_SIZE, color=1):
"""Draw data point on to the plot bitmap at (x,y)."""
if y is None:
return
offset = size // 2
for xx in range(x - offset, x + offset + 1):
if xx in range(self.xstart, self.xstart + self.width):
for yy in range(y - offset, y + offset + 1):
if yy in range(self.ystart, self.ystart + self.height):
try:
yy = GHEIGHT - yy
plot[xx, yy] = color
except IndexError:
pass
def draw_profile(graph, profile):
"""Update the display with current info."""
for i in range(GWIDTH * GHEIGHT):
plot[i] = 0
# draw stage lines
# preheat
graph.draw_line(
profile["stages"]["preheat"][0],
profile["temp_range"][0],
profile["stages"]["preheat"][0],
profile["temp_range"][1] * 1.1,
GRID_SIZE,
GRID_COLOR,
GRID_STYLE,
)
graph.draw_line(
profile["time_range"][0],
profile["stages"]["preheat"][1],
profile["time_range"][1],
profile["stages"]["preheat"][1],
GRID_SIZE,
GRID_COLOR,
GRID_STYLE,
)
# soak
graph.draw_line(
profile["stages"]["soak"][0],
profile["temp_range"][0],
profile["stages"]["soak"][0],
profile["temp_range"][1] * 1.1,
GRID_SIZE,
GRID_COLOR,
GRID_STYLE,
)
graph.draw_line(
profile["time_range"][0],
profile["stages"]["soak"][1],
profile["time_range"][1],
profile["stages"]["soak"][1],
GRID_SIZE,
GRID_COLOR,
GRID_STYLE,
)
# reflow
graph.draw_line(
profile["stages"]["reflow"][0],
profile["temp_range"][0],
profile["stages"]["reflow"][0],
| |
<reponame>KrishanBhasin/giraffez
# -*- coding: utf-8 -*-
#
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import struct
import threading
from ._teradatapt import EncoderError, InvalidCredentialsError, MLoad, TeradataError as TeradataPTError
from .constants import *
from .errors import *
from .cmd import TeradataCmd
from .connection import Connection, Context
from .encoders import DateHandler, null_handler
from .fmt import format_table
from .io import ArchiveFileReader, CSVReader, FileReader, JSONReader, Reader
from .logging import log
from .utils import get_version_info, pipeline, suppress_context
from ._compat import *
__all__ = ['BulkLoad']
error_table_count = """
select a.errorcode as code, a.errorfield as field, b.errortext as text, count(*) over (partition by a.errorcode, a.errorfield) as errcount
from {0}_e1 a
join dbc.errormsgs b
on a.errorcode = b.errorcode
group by 1,2,3;
"""
error_table_sample = """
select
a.errorcode as code,
a.errorfield as field,
min(substr(a.hostdata, 0, 30000)) as hostdata
from {0}_e1 a
qualify row_number() over (partition by a.errorcode, a.errorfield order by a.errorcode asc)=1
group by 1,2;
"""
class TeradataBulkLoad(Connection):
"""
The class for using the TPT API's UPDATE (MLoad) driver to insert a
large (> ~100k rows) amount of data into an existing Teradata table.
Exposed under the alias :class:`giraffez.BulkLoad`.
:param str table: The name of the target table for loading.
:param str host: Omit to read from :code:`~/.girafferc` configuration file.
:param str username: Omit to read from :code:`~/.girafferc` configuration file.
:param str password: Omit to read from :code:`~/.girafferc` configuration file.
:param int log_level: Specify the desired level of output from the job.
Possible values are :code:`giraffez.SILENCE`, :code:`giraffez.INFO` (default),
:code:`giraffez.VERBOSE`, and :code:`giraffez.DEBUG`
:param str config: Specify an alternate configuration file to be read from,
when previous paramaters are omitted.
:param str key_file: Specify an alternate key file to use for configuration decryption
:param string dsn: Specify a connection name from the configuration file to be
used, in place of the default.
:param bool protect: If authentication with Teradata fails and :code:`protect` is :code:`True`,
locks the connection used in the configuration file. This can be unlocked using the
command :code:`giraffez config --unlock <connection>`, changing the connection password,
or via the :meth:`~giraffez.config.Config.unlock_connection` method.
:param bool coerce_floats: Coerce Teradata decimal types into Python floats
:param bool cleanup: Attempt to cleanup all work tables
when context exits.
:param bool print_error_table: Prints a user-friendly version of the mload
error table to stderr.
:raises `giraffez.errors.InvalidCredentialsError`: if the supplied credentials are incorrect
:raises `giraffez.TeradataPTError`: if the connection cannot be established
If the target table is currently under an MLoad lock (such as if the
previous operation failed), a :code:`release mload` statement will be
executed on the table, and the load job will be re-attempted.
Meant to be used, where possible, with python's :code:`with` context handler
to guarantee that connections will be closed gracefully when operation
is complete.
"""
checkpoint_interval = 50000
def __init__(self, table=None, host=None, username=None, password=<PASSWORD>,
log_level=INFO, config=None, key_file=None, dsn=None, protect=False,
coerce_floats=False, cleanup=False, print_error_table=False):
super(TeradataBulkLoad, self).__init__(host, username, password, log_level, config, key_file,
dsn, protect)
# Attributes used with property getter/setters
self._columns = None
self._table_name = None
self.initiated = False
self.finished = False
self.coerce_floats = coerce_floats
self.perform_cleanup = cleanup
self.exit_code = None
self.applied_count = 0
self.error_count = 0
#: The amount of time spent in idle (waiting for server)
self.idle_time = 0
#: Prints the error table when there is an issue, good for troubleshooting jobs
self.print_error_table = print_error_table
self.preprocessor = lambda s: s
if table is not None:
self.table = table
def checkpoint(self):
"""
Execute a checkpoint while loading rows. Called automatically
when loading from a file. Updates the exit code of the driver to
reflect errors.
"""
return self.mload.checkpoint()
def cleanup(self):
"""
Drops any existing work tables, as returned by
:meth:`~giraffez.load.TeradataBulkLoad.tables`.
:raises `giraffez.TeradataPTError`: if a Teradata error ocurred
"""
threads = []
for i, table in enumerate(filter(lambda x: self.mload.exists(x), self.tables)):
log.info("BulkLoad", "Dropping table '{}'...".format(table))
t = threading.Thread(target=self.mload.drop_table, args=(table,))
threads.append(t)
t.start()
for t in threads:
t.join()
@property
def columns(self):
"""
The list of columns in use.
:getter: Return the list of columns in use.
:setter: Set the columns to be loaded into, as well as their order. If
loading from a file, these will be determined from the file header.
Not necessary if you are loading into all columns, in the original
order. The value must be a :code:`list` of names in the order that
the fields of data will be presented in each row.
Raises :class:`~giraffez.errors.GiraffeError` if :code:`field_names`
is not a :code:`list`.
Raises :class:`~giraffez.errors.GiraffeError` if the target table
has not been set.
:type: :class:`~giraffez.types.Columns`
"""
return self._columns
@columns.setter
def columns(self, field_names):
if not isinstance(field_names, list):
raise GiraffeError("Must set .columns property as type <List>")
fields = []
for field in field_names:
field = field.lower()
if field in fields:
raise GiraffeError("Cannot set duplicate column: '{}'".format(field))
fields.append(field)
self._columns = fields
def finish(self):
"""
Finishes the load job. Called automatically when the connection closes.
:return: The exit code returned when applying rows to the table
"""
if self.finished:
return self.exit_code
checkpoint_status = self.checkpoint()
self.exit_code = self._exit_code()
if self.exit_code != 0:
raise TeradataPTError("BulkLoad job finished with return code '{}'".format(self.exit_code))
# TODO(chris): should this happen every time?
if self.applied_count > 0:
self._end_acquisition()
self._apply_rows()
self.exit_code = self._exit_code()
if self.exit_code != 0:
raise TeradataPTError("BulkLoad job finished with return code '{}'".format(self.exit_code))
self.finished = True
return self.exit_code
def from_file(self, filename, table=None, delimiter='|', null='NULL',
panic=True, quotechar='"', parse_dates=False):
"""
Load from a file into the target table, handling each step of the
load process.
Can load from text files, and properly formatted giraffez archive
files. In both cases, if Gzip compression is detected the file will be
decompressed while reading and handled appropriately. The encoding is
determined automatically by the contents of the file.
It is not necessary to set the columns in use prior to loading from a file.
In the case of a text file, the header is used to determine column names
and their order. Valid delimiters include '|', ',', and '\\t' (tab). When
loading an archive file, the column information is decoded alongside the data.
:param str filename: The location of the file to be loaded
:param str table: The name of the target table, if it was not specified
to the constructor for the isntance
:param str null: The string that indicates a null value in the rows being
inserted from a file. Defaults to 'NULL'
:param str delimiter: When loading a file, indicates that fields are
separated by this delimiter. Defaults to :code:`None`, which causes the
delimiter to be determined from the header of the file. In most
cases, this behavior is sufficient
:param str quotechar: The character used to quote fields containing special characters,
like the delimiter.
:param bool panic: If :code:`True`, when an error is encountered it will be
raised. Otherwise, the error will be logged and :code:`self.error_count`
is incremented.
:return: The output of the call to
:meth:`~giraffez.load.TeradataBulkLoad.finish`
:raises `giraffez.errors.GiraffeError`: if table was not set and :code:`table`
is :code:`None`, or if a Teradata error ocurred while retrieving table info.
:raises `giraffez.errors.GiraffeEncodeError`: if :code:`panic` is :code:`True` and there
are format errors in the row values.
"""
if not self.table:
if not table:
raise GiraffeError("Table must be set or specified to load a file.")
self.table = table
if not isinstance(null, basestring):
raise GiraffeError("Expected 'null' to be str, received {}".format(type(null)))
with Reader(filename, delimiter=delimiter, quotechar=quotechar) as f:
if not isinstance(f.delimiter, basestring):
raise GiraffeError("Expected 'delimiter' to be str, received {}".format(type(delimiter)))
self.columns = f.header
if isinstance(f, ArchiveFileReader):
self.mload.set_encoding(ROW_ENCODING_RAW)
self.preprocessor = lambda s: s
if parse_dates:
self.preprocessor = DateHandler(self.columns)
self._initiate()
self.mload.set_null(null)
self.mload.set_delimiter(delimiter)
i = 0
for i, line in enumerate(f, 1):
self.put(line, panic=panic)
if i % self.checkpoint_interval == 1:
log.info("\rBulkLoad", "Processed {} rows".format(i), console=True)
checkpoint_status = self.checkpoint()
self.exit_code = self._exit_code()
if self.exit_code != | |
# -*- coding: utf-8 -*-
"""
The :mod:`PyDynamic.uncertainty.propagate_DFT` module implements methods for the propagation of uncertainties in the
application of the DFT, inverse DFT, deconvolution and multiplication in the frequency domain, transformation from
amplitude and phase to real and imaginary parts and vice versa.
The correspoding scientific publications is
<NAME> und <NAME>
GUM2DFT — a software tool for uncertainty evaluation of transient signals in the frequency domain.
*Measurement Science and Technology*, 27(5), 055001, 2016.
[DOI: `10.1088/0957-0233/27/5/055001 <http://dx.doi.org/10.1088/0957-0233/27/5/055001>`_]
"""
import numpy as np
from scipy import sparse
import warnings
__all__ = ['GUM_DFT','GUM_iDFT', 'GUM_DFTfreq', 'DFT_deconv', 'DFT_multiply', 'AmpPhase2DFT', 'DFT2AmpPhase', 'AmpPhase2Time', 'Time2AmpPhase']
def apply_window(x,Ux,window):
"""Apply a time domain window to the signal x of equal length and propagate uncertainties
Args:
x: vector of time domain signal values
Ux: covariance matrix associated with x or noise variance as float
window: vector of time domain window (same length as x)
Returns:
xw,Uxw
"""
assert(len(x)==len(window))
if not isinstance(Ux,float):
assert(Ux.shape[0]==Ux.shape[1] and Ux.shape[0]==len(x))
xw = x.copy()*window
if isinstance(Ux,float):
Uxw = Ux*window**2
else:
Uxw = prod(window,prod(Ux,window))
return xw,Uxw
def prod(A,B):
"""Calculate the matrix-vector product, or vector-matrix product
that corresponds to diag(A)*B or A*diag(B), respectively; depending
on which of A,B is the matrix and which the vector.
"""
if len(A.shape)==1 and len(B.shape)==2: # A is the vector and B the matrix
C = np.zeros_like(B)
for k in range(C.shape[0]):
C[k,:] = A[k]*B[k,:]
return C
elif len(A.shape)==2 and len(B.shape)==1: # A is the matrix and B the vector
C = np.zeros_like(A)
for k in range(C.shape[1]):
C[:,k] = A[:,k]*B[k]
return C
else:
raise ValueError("Wrong dimension of inputs")
def matprod(M,V,W,return_as_matrix=True):
"""Calculate the matrix-matrix-matrix product (V1,V2)M(W1,W2) for V=(V1,V2)
and W=(W1,W2). M can be sparse, one-dimensional or a full (quadratic) matrix.
"""
if len(M.shape)==2:
assert(M.shape[0]==M.shape[1])
assert(M.shape[0]==V.shape[0])
assert(V.shape==W.shape)
N = V.shape[0]//2
v1 = V[:N]; v2 = V[N:]
w1 = W[:N]; w2 = W[N:]
if isinstance(M,sparse.dia_matrix):
nrows = M.shape[0]
offset= M.offsets
diags = M.data
A = diags[0][:N]
B = diags[1][offset[1]:nrows+offset[1]]
D = diags[0][N:]
return np.diag(v1*A*w1 + v2*B*w1 + v1*B*w2 + v2*D*w2)
elif len(M.shape)==1:
A = M[:N]
D = M[N:]
if return_as_matrix:
return np.diag(v1*A*w1 + v2*D*w2)
else:
return np.r_[v1*A*w1 + v2*D*w2]
else:
A = M[:N,:N]
B = M[:N,N:]
D = M[N:,N:]
return prod(v1,prod(A,w1)) + prod(v2,prod(B.T,w1)) + prod(v1,prod(B,w2)) + prod(v2,prod(D,w2))
def GUM_DFT(x,Ux,N=None,window=None,CxCos=None,CxSin=None,returnC=False,mask=None):
"""Calculation of the DFT of the time domain signal x and propagation of the squared uncertainty Ux
associated with the time domain sequence x to the real and imaginary parts of the DFT of x.
Parameters
----------
x : numpy.ndarray
vector of time domain signal values
Ux : numpy.ndarray
covariance matrix associated with x, shape (N,N) or noise variance as float
N : int, optional
length of time domain signal for DFT; N>=len(x)
window : numpy.ndarray, optional
vector of the time domain window values
CxCos : numpy.ndarray, optional
cosine part of sensitivity matrix
CxSin : numpy.ndarray, optional
sine part of sensitivity matrix
returnC : bool, optional
if true, return sensitivity matrix blocks for later use
mask: ndarray of dtype bool
calculate DFT values and uncertainties only at those frequencies where mask is `True`
Returns
-------
F : numpy.ndarray
vector of complex valued DFT values or of its real and imaginary parts
UF : numpy.ndarray
covariance matrix associated with real and imaginary part of F
References
----------
* Eichstädt <NAME> [Eichst2016]_
"""
L=0
if isinstance(window,np.ndarray):
x,Ux = apply_window(x,Ux,window)
if isinstance(N,int):
L = N - len(x)
assert(L>=0)
x = np.r_[x.copy(),np.zeros(L,)]
N = len(x)
if np.mod(N,2) == 0: # N is even
M = N+2
else:
M = N+1
if isinstance(mask, np.ndarray):
F = np.fft.rfft(x)[mask]
F = np.r_[np.real(F), np.imag(F)]
warnings.warn("In a future release, because of issues with the current version, \nthe handling of masked DFT arrays will be changed to use numpy masked arrays.",DeprecationWarning)
else:
F = np.fft.rfft(x)
F = np.r_[np.real(F),np.imag(F)]
mask = np.ones(len(F)//2, dtype=bool)
Nm = 2*np.sum(mask)
beta = 2*np.pi*np.arange(N-L)/N
Cxkc = lambda k: np.cos(k*beta)[np.newaxis,:]
Cxks = lambda k: -np.sin(k*beta)[np.newaxis,:]
if isinstance(Ux,float):
UF = np.zeros(Nm)
km = 0
for k in range(M//2): # Block cos/cos
if mask[k]:
UF[km] = np.sum(Ux*Cxkc(k)**2)
km += 1
km = 0
for k in range(M//2): # Block sin/sin
if mask[k]:
UF[Nm//2+km] = np.sum(Ux*Cxks(k)**2)
km += 1
else: # general method
if len(Ux.shape)==1:
Ux = np.diag(Ux)
if not isinstance(CxCos,np.ndarray):
CxCos = np.zeros((Nm//2,N-L))
CxSin = np.zeros((Nm//2,N-L))
km = 0
for k in range(M//2):
if mask[k]:
CxCos[km,:] = Cxkc(k)
CxSin[km,:] = Cxks(k)
km += 1
UFCC = np.dot(CxCos,np.dot(Ux,CxCos.T))
UFCS = np.dot(CxCos,np.dot(Ux,CxSin.T))
UFSS = np.dot(CxSin,np.dot(Ux,CxSin.T))
try:
UF = np.vstack((np.hstack((UFCC,UFCS)),np.hstack((UFCS.T,UFSS))))
except MemoryError:
print("Could not put covariance matrix together due to memory constraints.")
print("Returning the three blocks (A,B,C) such that U = [[A,B],[B.T,C]] instead.")
UF = (UFCC,UFCS,UFSS)
if returnC:
return F,UF,{"CxCos":CxCos,"CxSin":CxSin}
else:
return F,UF
def GUM_iDFT(F,UF,Nx=None,Cc=None,Cs=None,returnC=False):
"""GUM propagation of the squared uncertainty UF associated with the DFT values F through the
inverse DFT
The matrix UF is assumed to be for real and imaginary part with blocks:
UF = [[u(R,R), u(R,I)],[u(I,R),u(I,I)]]
and real and imaginary part obtained from calling rfft (DFT for real-valued signal)
Parameters
----------
F : np.ndarray
vector of real and imaginary parts of a DFT result
UF: np.ndarray
covariance matrix associated with real and imaginary parts of F
Nx: int, optional
number of samples of iDFT result
Cc: np.ndarray, optional
cosine part of sensitivities
Cs: np.ndarray, optional
sine part of sensitivities
returnC: if true, return sensitivity matrix blocks
Returns
-------
x: np.ndarry
vector of time domain signal values
Ux: np.ndarray
covariance matrix associated with x
References
----------
* <NAME> Wilkens [Eichst2016]_
"""
N = UF.shape[0]-2
if Nx is None:
Nx = N
else:
assert(Nx<=UF.shape[0]-2)
beta = 2*np.pi*np.arange(Nx)/N
# calculate inverse DFT
x = np.fft.irfft(F[:N//2+1]+1j*F[N//2+1:])[:Nx]
if not isinstance(Cc,np.ndarray):# calculate sensitivities
Cc = np.zeros((Nx,N//2+1))
Cc[:,0] = 1.0; Cc[:,-1] = np.cos(np.pi*np.arange(Nx))
for k in range(1,N//2):
Cc[:,k] = 2*np.cos(k*beta)
if not isinstance(Cs,np.ndarray):
Cs = np.zeros((Nx,N//2+1))
Cs[:,0] = 0.0; Cs[:,-1] = -np.sin(np.pi*np.arange(Nx))
for k in range(1,N//2):
Cs[:,k] = -2*np.sin(k*beta)
# calculate blocks of uncertainty matrix
if len(UF.shape)==2:
RR = UF[:N//2+1,:N//2+1]
RI = UF[:N//2+1, N//2+1:]
II = UF[N//2+1:,N//2+1:]
# propagate uncertainties
Ux = np.dot(Cc,np.dot(RR,Cc.T))
Ux = Ux + 2*np.dot(Cc,np.dot(RI,Cs.T))
Ux = Ux + np.dot(Cs,np.dot(II,Cs.T))
else:
RR = UF[:N//2+1]
II = UF[N//2+1:]
Ux = np.dot(Cc,prod(RR,Cc.T)) + np.dot(Cs,prod(II,Cs.T))
if returnC:
return x,Ux/N**2,{"Cc":Cc,"Cs":Cs}
else:
return x,Ux/N**2
def GUM_DFTfreq(N, dt=1):
"""Return the Discrete Fourier Transform sample frequencies
Parameters
----------
N: int
window length
dt: float
sample spacing (inverse of sampling rate)
Returns
-------
f: ndarray
Array of length ``n//2 + 1`` containing the sample frequencies
See also
--------
`mod`::numpy.fft.rfftfreq
"""
return np.fft.rfftfreq(N, dt)
def DFT2AmpPhase(F,UF,keep_sparse=False, tol=1.0, return_type="separate"):
"""Transformation from real and imaginary parts to magnitude and phase
Calculate the matrix
U_AP = [[U1,U2],[U2^T,U3]]
associated with magnitude and phase of the vector F=[real,imag]
with associated covariance matrix U_F=[[URR,URI],[URI^T,UII]]
Parameters
----------
F: np.ndarray
vector of real and imaginary parts of a DFT result
UF: np.ndarray
covariance matrix associated with F
keep_sparse: bool, optional
if true then UAP will be sparse if UF is one-dimensional
tol: float, optional
lower bound for A/uF below which a warning will be issued concerning unreliable results
return_type: str, optional
If "separate" then magnitude and phase are returned as seperate arrays. Otherwise the array [A, P] is returned
Returns
-------
If `return_type` is `separate`:
A: np.ndarray
vector of magnitude values
P: np.ndarray
vector of phase values in radians, in the range [-pi, pi]
UAP: np.ndarray
covariance matrix associated with (A,P)
Otherwise:
AP: np.ndarray
vector of magnitude and phase values
UAP: np.ndarray
covariance matrix associated with AP
"""
# calculate inverse DFT
N = len(F)-2
R = F[:N//2+1]; I = F[N//2+1:]
A = np.sqrt(R**2+I**2)
P = np.arctan2(I,R)
if len(UF.shape)==1:
uF = 0.5*(np.sqrt(UF[:N//2+1])+np.sqrt(UF[N//2+1:]))
else:
uF = 0.5*(np.sqrt(np.diag(UF[:N//2+1,:N//2+1]))+ np.sqrt(np.diag(UF[N//2+1:,N//2+1:])))
if np.any(A/uF < tol):
print('DFT2AmpPhase Warning\n Some amplitude values are below the defined threshold.')
print('The GUM formulas may become unreliable and a Monte Carlo approach is recommended instead.')
print('The actual minimum value of A/uF is %.2e and the threshold is %.2e'%((A/uF).min(), tol))
aR = R/A
aI = I/A
pR = -I/A**2
pI = R/A**2
if len(UF.shape)==1:
URR = UF[:N//2+1]
UII = UF[N//2+1:]
U11 = URR*aR**2 + UII*aI**2
U12 = aR*URR*pR + aI*UII*pI
U22 = URR*pR**2 + UII*pI**2
UAP = sparse.diags([np.r_[U11,U22],U12,U12],[0,N//2+1,-(N//2+1)])
if not keep_sparse:
UAP = UAP.toarray()
else:
URR = UF[:N//2+1,:N//2+1]
URI = UF[:N//2+1,N//2+1:]
UII = UF[N//2+1:,N//2+1:]
U11 = prod(aR,prod(URR,aR)) + prod(aR,prod(URI,aI)) + prod(aI,prod(URI.T,aR)) + prod(aI,prod(UII,aI))
U12 = prod(aR,prod(URR,pR)) + prod(aI,prod(URI,pI)) + prod(aI,prod(URI.T,pR)) + prod(aI,prod(UII,pI))
U22 = prod(pR,prod(URR,pR)) + prod(pR,prod(URI,pI)) + prod(pI,prod(URI.T,pR)) + prod(pI,prod(UII,pI))
UAP = np.vstack((np.hstack((U11,U12)),np.hstack((U12.T,U22))))
if return_type == "separate":
return A,P,UAP
else:
return np.r_[A,P], UAP
def AmpPhase2DFT(A,P,UAP,keep_sparse=False):
"""Transformation from magnitude and phase to real and imaginary parts
Calculate the vector F=[real,imag] and propagate the covariance matrix UAP associated with [A, P]
Parameters
----------
A: np.ndarray
vector of magnitude values
P: np.ndarray
vector of phase values (in radians)
UAP: np.ndarray
covariance matrix associated with (A,P)
or vector of squared standard uncertainties [u^2(A),u^2(P)]
keep_sparse: bool, optional
whether to transform sparse matrix to numpy array or not
Returns
-------
F: np.ndarray
vector of real and imaginary parts of DFT result
UF: np.ndarray
covariance matrix associated with F
"""
assert(len(A.shape)==1)
assert(A.shape==P.shape)
assert(UAP.shape==(2*len(A),2*len(A)) or UAP.shape==(2*len(A),))
# calculation of F
F = np.r_[A*np.cos(P),A*np.sin(P)]
# calculation of sensitivities
CRA = np.cos(P)
CRP = -A*np.sin(P)
CIA = np.sin(P)
CIP = A*np.cos(P)
# assignment of uncertainty blocks in UAP
N = len(A)
if UAP.shape==(2*N,): # zero correlation; just standard deviations
Ua = UAP[:N]
Up = UAP[N:]
U11 = CRA*Ua*CRA + CRP*Up*CRP
U12 = CRA*Ua*CIA + CRP*Up*CIP
U22 = CIA*Ua*CIA + CIP*Up*CIP
UF = sparse.diags([np.r_[U11,U22],U12,U12],[0,N,-N])
if not keep_sparse:
UF = UF.toarray()
else:
if isinstance(UAP,sparse.dia_matrix):
nrows = 2*N
offset= UAP.offsets
diags = UAP.data
Uaa = diags[0][:N]
Uap = diags[1][offset[1]:nrows+offset[1]]
Upp = diags[0][N:]
U11 = Uaa*CRA**2 + CRP*Uap*CRA + CRA*Uap*CRP + Upp*CRP**2
U12 = CRA*Uaa*CIA + CRP*Uap*CIA + CRA*Uap*CIA + CRP*Upp*CIP
U22 = Uaa*CIA**2 + CIP*Uap*CIA + CIA*Uap*CIP + Upp*CIP**2
UF = sparse.diags([np.r_[U11,U22],U12,U12],[0,N,-N])
if not keep_sparse:
UF = UF.toarray()
else:
Uaa = UAP[:N,:N]
Uap = UAP[:N,N:]
Upp = UAP[N:,N:]
U11 = prod(CRA,prod(Uaa,CRA)) + prod(CRP,prod(Uap.T,CRA)) + prod(CRA,prod(Uap,CRP)) + prod(CRP,prod(Upp,CRP))
U12 = prod(CRA,prod(Uaa,CIA)) + prod(CRP,prod(Uap.T,CIA)) + prod(CRA,prod(Uap,CIA)) + prod(CRP,prod(Upp,CIP))
U22 = prod(CIA,prod(Uaa,CIA)) + prod(CIP,prod(Uap.T,CIA)) + prod(CIA,prod(Uap,CIP)) + prod(CIP,prod(Upp,CIP))
UF = np.vstack((np.hstack((U11,U12)),np.hstack((U12.T,U22))))
return F, UF
def Time2AmpPhase(x,Ux):
"""Transformation from time domain to amplitude and phase
Parameters
----------
x: np.ndarray
time domain signal
Ux: np.ndarray
squared uncertainty associated with x
Returns
-------
A: np.ndarray
amplitude values
P: np.ndarray
phase values
UAP: np.ndarray
covariance matrix associated with [A,P]
"""
F,UF = GUM_DFT(x,Ux)
A,P,UAP = DFT2AmpPhase(F,UF)
return A,P,UAP
def AmpPhase2Time(A,P,UAP):
"""Transformation from amplitude | |
'73.0.3683.78',
'74.0.3729.9',
'74.0.3729.8',
'74.0.3729.7',
'75.0.3731.3',
'75.0.3731.2',
'75.0.3731.0',
'74.0.3729.6',
'73.0.3683.77',
'73.0.3683.76',
'75.0.3730.5',
'75.0.3730.4',
'73.0.3683.75',
'74.0.3729.5',
'73.0.3683.74',
'75.0.3730.3',
'75.0.3730.2',
'74.0.3729.4',
'73.0.3683.73',
'73.0.3683.72',
'75.0.3730.1',
'75.0.3730.0',
'74.0.3729.3',
'73.0.3683.71',
'74.0.3729.2',
'73.0.3683.70',
'74.0.3729.1',
'74.0.3729.0',
'74.0.3726.4',
'73.0.3683.69',
'74.0.3726.3',
'74.0.3728.0',
'74.0.3726.2',
'73.0.3683.68',
'74.0.3726.1',
'74.0.3726.0',
'74.0.3725.4',
'73.0.3683.67',
'73.0.3683.66',
'74.0.3725.3',
'74.0.3725.2',
'74.0.3725.1',
'74.0.3724.8',
'74.0.3725.0',
'73.0.3683.65',
'74.0.3724.7',
'74.0.3724.6',
'74.0.3724.5',
'74.0.3724.4',
'74.0.3724.3',
'74.0.3724.2',
'74.0.3724.1',
'74.0.3724.0',
'73.0.3683.64',
'74.0.3723.1',
'74.0.3723.0',
'73.0.3683.63',
'74.0.3722.1',
'74.0.3722.0',
'73.0.3683.62',
'74.0.3718.9',
'74.0.3702.3',
'74.0.3721.3',
'74.0.3721.2',
'74.0.3721.1',
'74.0.3721.0',
'74.0.3720.6',
'73.0.3683.61',
'72.0.3626.122',
'73.0.3683.60',
'74.0.3720.5',
'72.0.3626.121',
'74.0.3718.8',
'74.0.3720.4',
'74.0.3720.3',
'74.0.3718.7',
'74.0.3720.2',
'74.0.3720.1',
'74.0.3720.0',
'74.0.3718.6',
'74.0.3719.5',
'73.0.3683.59',
'74.0.3718.5',
'74.0.3718.4',
'74.0.3719.4',
'74.0.3719.3',
'74.0.3719.2',
'74.0.3719.1',
'73.0.3683.58',
'74.0.3719.0',
'73.0.3683.57',
'73.0.3683.56',
'74.0.3718.3',
'73.0.3683.55',
'74.0.3718.2',
'74.0.3718.1',
'74.0.3718.0',
'73.0.3683.54',
'74.0.3717.2',
'73.0.3683.53',
'74.0.3717.1',
'74.0.3717.0',
'73.0.3683.52',
'74.0.3716.1',
'74.0.3716.0',
'73.0.3683.51',
'74.0.3715.1',
'74.0.3715.0',
'73.0.3683.50',
'74.0.3711.2',
'74.0.3714.2',
'74.0.3713.3',
'74.0.3714.1',
'74.0.3714.0',
'73.0.3683.49',
'74.0.3713.1',
'74.0.3713.0',
'72.0.3626.120',
'73.0.3683.48',
'74.0.3712.2',
'74.0.3712.1',
'74.0.3712.0',
'73.0.3683.47',
'72.0.3626.119',
'73.0.3683.46',
'74.0.3710.2',
'72.0.3626.118',
'74.0.3711.1',
'74.0.3711.0',
'73.0.3683.45',
'72.0.3626.117',
'74.0.3710.1',
'74.0.3710.0',
'73.0.3683.44',
'72.0.3626.116',
'74.0.3709.1',
'74.0.3709.0',
'74.0.3704.9',
'73.0.3683.43',
'72.0.3626.115',
'74.0.3704.8',
'74.0.3704.7',
'74.0.3708.0',
'74.0.3706.7',
'74.0.3704.6',
'73.0.3683.42',
'72.0.3626.114',
'74.0.3706.6',
'72.0.3626.113',
'74.0.3704.5',
'74.0.3706.5',
'74.0.3706.4',
'74.0.3706.3',
'74.0.3706.2',
'74.0.3706.1',
'74.0.3706.0',
'73.0.3683.41',
'72.0.3626.112',
'74.0.3705.1',
'74.0.3705.0',
'73.0.3683.40',
'72.0.3626.111',
'73.0.3683.39',
'74.0.3704.4',
'73.0.3683.38',
'74.0.3704.3',
'74.0.3704.2',
'74.0.3704.1',
'74.0.3704.0',
'73.0.3683.37',
'72.0.3626.110',
'72.0.3626.109',
'74.0.3703.3',
'74.0.3703.2',
'73.0.3683.36',
'74.0.3703.1',
'74.0.3703.0',
'73.0.3683.35',
'72.0.3626.108',
'74.0.3702.2',
'74.0.3699.3',
'74.0.3702.1',
'74.0.3702.0',
'73.0.3683.34',
'72.0.3626.107',
'73.0.3683.33',
'74.0.3701.1',
'74.0.3701.0',
'73.0.3683.32',
'73.0.3683.31',
'72.0.3626.105',
'74.0.3700.1',
'74.0.3700.0',
'73.0.3683.29',
'72.0.3626.103',
'74.0.3699.2',
'74.0.3699.1',
'74.0.3699.0',
'73.0.3683.28',
'72.0.3626.102',
'73.0.3683.27',
'73.0.3683.26',
'74.0.3698.0',
'74.0.3696.2',
'72.0.3626.101',
'73.0.3683.25',
'74.0.3696.1',
'74.0.3696.0',
'74.0.3694.8',
'72.0.3626.100',
'74.0.3694.7',
'74.0.3694.6',
'74.0.3694.5',
'74.0.3694.4',
'72.0.3626.99',
'72.0.3626.98',
'74.0.3694.3',
'73.0.3683.24',
'72.0.3626.97',
'72.0.3626.96',
'72.0.3626.95',
'73.0.3683.23',
'72.0.3626.94',
'73.0.3683.22',
'73.0.3683.21',
'72.0.3626.93',
'74.0.3694.2',
'72.0.3626.92',
'74.0.3694.1',
'74.0.3694.0',
'74.0.3693.6',
'73.0.3683.20',
'72.0.3626.91',
'74.0.3693.5',
'74.0.3693.4',
'74.0.3693.3',
'74.0.3693.2',
'73.0.3683.19',
'74.0.3693.1',
'74.0.3693.0',
'73.0.3683.18',
'72.0.3626.90',
'74.0.3692.1',
'74.0.3692.0',
'73.0.3683.17',
'72.0.3626.89',
'74.0.3687.3',
'74.0.3691.1',
'74.0.3691.0',
'73.0.3683.16',
'72.0.3626.88',
'72.0.3626.87',
'73.0.3683.15',
'74.0.3690.1',
'74.0.3690.0',
'73.0.3683.14',
'72.0.3626.86',
'73.0.3683.13',
'73.0.3683.12',
'74.0.3689.1',
'74.0.3689.0',
'73.0.3683.11',
'72.0.3626.85',
'73.0.3683.10',
'72.0.3626.84',
'73.0.3683.9',
'74.0.3688.1',
'74.0.3688.0',
'73.0.3683.8',
'72.0.3626.83',
'74.0.3687.2',
'74.0.3687.1',
'74.0.3687.0',
'73.0.3683.7',
'72.0.3626.82',
'74.0.3686.4',
'72.0.3626.81',
'74.0.3686.3',
'74.0.3686.2',
'74.0.3686.1',
'74.0.3686.0',
'73.0.3683.6',
'72.0.3626.80',
'74.0.3685.1',
'74.0.3685.0',
'73.0.3683.5',
'72.0.3626.79',
'74.0.3684.1',
'74.0.3684.0',
'73.0.3683.4',
'72.0.3626.78',
'72.0.3626.77',
'73.0.3683.3',
'73.0.3683.2',
'72.0.3626.76',
'73.0.3683.1',
'73.0.3683.0',
'72.0.3626.75',
'71.0.3578.141',
'73.0.3682.1',
'73.0.3682.0',
'72.0.3626.74',
'71.0.3578.140',
'73.0.3681.4',
'73.0.3681.3',
'73.0.3681.2',
'73.0.3681.1',
'73.0.3681.0',
'72.0.3626.73',
'71.0.3578.139',
'72.0.3626.72',
'72.0.3626.71',
'73.0.3680.1',
'73.0.3680.0',
'72.0.3626.70',
'71.0.3578.138',
'73.0.3678.2',
'73.0.3679.1',
'73.0.3679.0',
'72.0.3626.69',
'71.0.3578.137',
'73.0.3678.1',
'73.0.3678.0',
'71.0.3578.136',
'73.0.3677.1',
'73.0.3677.0',
'72.0.3626.68',
'72.0.3626.67',
'71.0.3578.135',
'73.0.3676.1',
'73.0.3676.0',
'73.0.3674.2',
'72.0.3626.66',
'71.0.3578.134',
'73.0.3674.1',
'73.0.3674.0',
'72.0.3626.65',
'71.0.3578.133',
'73.0.3673.2',
'73.0.3673.1',
'73.0.3673.0',
'72.0.3626.64',
'71.0.3578.132',
'72.0.3626.63',
'72.0.3626.62',
'72.0.3626.61',
'72.0.3626.60',
'73.0.3672.1',
'73.0.3672.0',
'72.0.3626.59',
'71.0.3578.131',
'73.0.3671.3',
'73.0.3671.2',
'73.0.3671.1',
'73.0.3671.0',
'72.0.3626.58',
'71.0.3578.130',
'73.0.3670.1',
'73.0.3670.0',
'72.0.3626.57',
'71.0.3578.129',
'73.0.3669.1',
'73.0.3669.0',
'72.0.3626.56',
'71.0.3578.128',
'73.0.3668.2',
'73.0.3668.1',
'73.0.3668.0',
'72.0.3626.55',
'71.0.3578.127',
'73.0.3667.2',
'73.0.3667.1',
'73.0.3667.0',
'72.0.3626.54',
'71.0.3578.126',
'73.0.3666.1',
'73.0.3666.0',
'72.0.3626.53',
'71.0.3578.125',
'73.0.3665.4',
'73.0.3665.3',
'72.0.3626.52',
'73.0.3665.2',
'73.0.3664.4',
'73.0.3665.1',
'73.0.3665.0',
'72.0.3626.51',
'71.0.3578.124',
'72.0.3626.50',
'73.0.3664.3',
'73.0.3664.2',
'73.0.3664.1',
'73.0.3664.0',
'73.0.3663.2',
'72.0.3626.49',
'71.0.3578.123',
'73.0.3663.1',
'73.0.3663.0',
'72.0.3626.48',
'71.0.3578.122',
'73.0.3662.1',
'73.0.3662.0',
'72.0.3626.47',
'71.0.3578.121',
'73.0.3661.1',
'72.0.3626.46',
'73.0.3661.0',
'72.0.3626.45',
'71.0.3578.120',
'73.0.3660.2',
'73.0.3660.1',
'73.0.3660.0',
'72.0.3626.44',
'71.0.3578.119',
'73.0.3659.1',
'73.0.3659.0',
'72.0.3626.43',
'71.0.3578.118',
'73.0.3658.1',
'73.0.3658.0',
'72.0.3626.42',
'71.0.3578.117',
'73.0.3657.1',
'73.0.3657.0',
'72.0.3626.41',
'71.0.3578.116',
'73.0.3656.1',
'73.0.3656.0',
'72.0.3626.40',
'71.0.3578.115',
'73.0.3655.1',
'73.0.3655.0',
'72.0.3626.39',
'71.0.3578.114',
'73.0.3654.1',
'73.0.3654.0',
'72.0.3626.38',
'71.0.3578.113',
'73.0.3653.1',
'73.0.3653.0',
'72.0.3626.37',
'71.0.3578.112',
'73.0.3652.1',
'73.0.3652.0',
'72.0.3626.36',
'71.0.3578.111',
'73.0.3651.1',
'73.0.3651.0',
'72.0.3626.35',
'71.0.3578.110',
'73.0.3650.1',
'73.0.3650.0',
'72.0.3626.34',
'71.0.3578.109',
'73.0.3649.1',
'73.0.3649.0',
'72.0.3626.33',
'71.0.3578.108',
'73.0.3648.2',
'73.0.3648.1',
'73.0.3648.0',
'72.0.3626.32',
'71.0.3578.107',
'73.0.3647.2',
'73.0.3647.1',
'73.0.3647.0',
'72.0.3626.31',
'71.0.3578.106',
'73.0.3635.3',
'73.0.3646.2',
'73.0.3646.1',
'73.0.3646.0',
'72.0.3626.30',
'71.0.3578.105',
'72.0.3626.29',
'73.0.3645.2',
'73.0.3645.1',
'73.0.3645.0',
'72.0.3626.28',
'71.0.3578.104',
'72.0.3626.27',
'72.0.3626.26',
'72.0.3626.25',
'72.0.3626.24',
'73.0.3644.0',
'73.0.3643.2',
'72.0.3626.23',
'71.0.3578.103',
'73.0.3643.1',
'73.0.3643.0',
'72.0.3626.22',
'71.0.3578.102',
'73.0.3642.1',
'73.0.3642.0',
'72.0.3626.21',
'71.0.3578.101',
'73.0.3641.1',
'73.0.3641.0',
'72.0.3626.20',
'71.0.3578.100',
'72.0.3626.19',
'73.0.3640.1',
'73.0.3640.0',
'72.0.3626.18',
'73.0.3639.1',
'71.0.3578.99',
'73.0.3639.0',
'72.0.3626.17',
'73.0.3638.2',
'72.0.3626.16',
'73.0.3638.1',
'73.0.3638.0',
'72.0.3626.15',
'71.0.3578.98',
'73.0.3635.2',
'71.0.3578.97',
'73.0.3637.1',
'73.0.3637.0',
'72.0.3626.14',
'71.0.3578.96',
'71.0.3578.95',
'72.0.3626.13',
'71.0.3578.94',
'73.0.3636.2',
'71.0.3578.93',
'73.0.3636.1',
'73.0.3636.0',
'72.0.3626.12',
'71.0.3578.92',
'73.0.3635.1',
'73.0.3635.0',
'72.0.3626.11',
'71.0.3578.91',
'73.0.3634.2',
'73.0.3634.1',
'73.0.3634.0',
'72.0.3626.10',
'71.0.3578.90',
'71.0.3578.89',
'73.0.3633.2',
'73.0.3633.1',
'73.0.3633.0',
'72.0.3610.4',
'72.0.3626.9',
'71.0.3578.88',
'73.0.3632.5',
'73.0.3632.4',
'73.0.3632.3',
'73.0.3632.2',
'73.0.3632.1',
'73.0.3632.0',
'72.0.3626.8',
'71.0.3578.87',
'73.0.3631.2',
'73.0.3631.1',
'73.0.3631.0',
'72.0.3626.7',
'71.0.3578.86',
'72.0.3626.6',
'73.0.3630.1',
'73.0.3630.0',
'72.0.3626.5',
'71.0.3578.85',
'72.0.3626.4',
'73.0.3628.3',
'73.0.3628.2',
'73.0.3629.1',
'73.0.3629.0',
'72.0.3626.3',
'71.0.3578.84',
'73.0.3628.1',
'73.0.3628.0',
'71.0.3578.83',
'73.0.3627.1',
'73.0.3627.0',
'72.0.3626.2',
'71.0.3578.82',
'71.0.3578.81',
'71.0.3578.80',
'72.0.3626.1',
'72.0.3626.0',
'71.0.3578.79',
'70.0.3538.124',
'71.0.3578.78',
'72.0.3623.4',
'72.0.3625.2',
'72.0.3625.1',
'72.0.3625.0',
'71.0.3578.77',
'70.0.3538.123',
'72.0.3624.4',
'72.0.3624.3',
'72.0.3624.2',
'71.0.3578.76',
'72.0.3624.1',
'72.0.3624.0',
'72.0.3623.3',
'71.0.3578.75',
'70.0.3538.122',
'71.0.3578.74',
'72.0.3623.2',
'72.0.3610.3',
'72.0.3623.1',
'72.0.3623.0',
'72.0.3622.3',
'72.0.3622.2',
'71.0.3578.73',
'70.0.3538.121',
'72.0.3622.1',
'72.0.3622.0',
'71.0.3578.72',
'70.0.3538.120',
'72.0.3621.1',
'72.0.3621.0',
'71.0.3578.71',
'70.0.3538.119',
'72.0.3620.1',
'72.0.3620.0',
'71.0.3578.70',
'70.0.3538.118',
'71.0.3578.69',
'72.0.3619.1',
'72.0.3619.0',
'71.0.3578.68',
'70.0.3538.117',
'71.0.3578.67',
'72.0.3618.1',
'72.0.3618.0',
'71.0.3578.66',
'70.0.3538.116',
'72.0.3617.1',
'72.0.3617.0',
'71.0.3578.65',
'70.0.3538.115',
'72.0.3602.3',
'71.0.3578.64',
'72.0.3616.1',
'72.0.3616.0',
'71.0.3578.63',
'70.0.3538.114',
'71.0.3578.62',
'72.0.3615.1',
'72.0.3615.0',
'71.0.3578.61',
'70.0.3538.113',
'72.0.3614.1',
'72.0.3614.0',
'71.0.3578.60',
'70.0.3538.112',
'72.0.3613.1',
'72.0.3613.0',
'71.0.3578.59',
'70.0.3538.111',
'72.0.3612.2',
'72.0.3612.1',
'72.0.3612.0',
'70.0.3538.110',
'71.0.3578.58',
'70.0.3538.109',
'72.0.3611.2',
'72.0.3611.1',
'72.0.3611.0',
'71.0.3578.57',
'70.0.3538.108',
'72.0.3610.2',
'71.0.3578.56',
'71.0.3578.55',
'72.0.3610.1',
'72.0.3610.0',
'71.0.3578.54',
'70.0.3538.107',
'71.0.3578.53',
'72.0.3609.3',
'71.0.3578.52',
'72.0.3609.2',
'71.0.3578.51',
'72.0.3608.5',
'72.0.3609.1',
'72.0.3609.0',
'71.0.3578.50',
'70.0.3538.106',
'72.0.3608.4',
'72.0.3608.3',
'72.0.3608.2',
'71.0.3578.49',
'72.0.3608.1',
'72.0.3608.0',
'70.0.3538.105',
'71.0.3578.48',
'72.0.3607.1',
'72.0.3607.0',
'71.0.3578.47',
'70.0.3538.104',
'72.0.3606.2',
'72.0.3606.1',
'72.0.3606.0',
'71.0.3578.46',
'70.0.3538.103',
'70.0.3538.102',
'72.0.3605.3',
'72.0.3605.2',
'72.0.3605.1',
'72.0.3605.0',
'71.0.3578.45',
'70.0.3538.101',
'71.0.3578.44',
'71.0.3578.43',
'70.0.3538.100',
'70.0.3538.99',
'71.0.3578.42',
'72.0.3604.1',
'72.0.3604.0',
'71.0.3578.41',
'70.0.3538.98',
'71.0.3578.40',
'72.0.3603.2',
'72.0.3603.1',
'72.0.3603.0',
'71.0.3578.39',
'70.0.3538.97',
'72.0.3602.2',
'71.0.3578.38',
'71.0.3578.37',
'72.0.3602.1',
'72.0.3602.0',
'71.0.3578.36',
'70.0.3538.96',
'72.0.3601.1',
'72.0.3601.0',
'71.0.3578.35',
'70.0.3538.95',
'72.0.3600.1',
'72.0.3600.0',
'71.0.3578.34',
'70.0.3538.94',
'72.0.3599.3',
'72.0.3599.2',
'72.0.3599.1',
'72.0.3599.0',
'71.0.3578.33',
'70.0.3538.93',
'72.0.3598.1',
'72.0.3598.0',
'71.0.3578.32',
'70.0.3538.87',
'72.0.3597.1',
'72.0.3597.0',
'72.0.3596.2',
'71.0.3578.31',
'70.0.3538.86',
'71.0.3578.30',
'71.0.3578.29',
'72.0.3596.1',
'72.0.3596.0',
'71.0.3578.28',
'70.0.3538.85',
'72.0.3595.2',
'72.0.3591.3',
'72.0.3595.1',
'72.0.3595.0',
'71.0.3578.27',
'70.0.3538.84',
'72.0.3594.1',
'72.0.3594.0',
'71.0.3578.26',
'70.0.3538.83',
'72.0.3593.2',
'72.0.3593.1',
'72.0.3593.0',
'71.0.3578.25',
'70.0.3538.82',
'72.0.3589.3',
'72.0.3592.2',
'72.0.3592.1',
'72.0.3592.0',
'71.0.3578.24',
'72.0.3589.2',
'70.0.3538.81',
'70.0.3538.80',
'72.0.3591.2',
'72.0.3591.1',
'72.0.3591.0',
'71.0.3578.23',
'70.0.3538.79',
'71.0.3578.22',
'72.0.3590.1',
'72.0.3590.0',
'71.0.3578.21',
'70.0.3538.78',
'70.0.3538.77',
'72.0.3589.1',
'72.0.3589.0',
'71.0.3578.20',
'70.0.3538.76',
'71.0.3578.19',
'70.0.3538.75',
'72.0.3588.1',
'72.0.3588.0',
'71.0.3578.18',
'70.0.3538.74',
'72.0.3586.2',
'72.0.3587.0',
'71.0.3578.17',
'70.0.3538.73',
'72.0.3586.1',
'72.0.3586.0',
'71.0.3578.16',
'70.0.3538.72',
'72.0.3585.1',
'72.0.3585.0',
'71.0.3578.15',
'70.0.3538.71',
'71.0.3578.14',
'72.0.3584.1',
'72.0.3584.0',
'71.0.3578.13',
'70.0.3538.70',
'72.0.3583.2',
'71.0.3578.12',
'72.0.3583.1',
'72.0.3583.0',
'71.0.3578.11',
'70.0.3538.69',
'71.0.3578.10',
'72.0.3582.0',
'72.0.3581.4',
'71.0.3578.9',
'70.0.3538.67',
'72.0.3581.3',
'72.0.3581.2',
'72.0.3581.1',
'72.0.3581.0',
'71.0.3578.8',
'70.0.3538.66',
'72.0.3580.1',
'72.0.3580.0',
'71.0.3578.7',
'70.0.3538.65',
'71.0.3578.6',
'72.0.3579.1',
'72.0.3579.0',
'71.0.3578.5',
'70.0.3538.64',
'71.0.3578.4',
'71.0.3578.3',
'71.0.3578.2',
'71.0.3578.1',
'71.0.3578.0',
'70.0.3538.63',
'69.0.3497.128',
'70.0.3538.62',
'70.0.3538.61',
'70.0.3538.60',
'70.0.3538.59',
'71.0.3577.1',
'71.0.3577.0',
'70.0.3538.58',
'69.0.3497.127',
'71.0.3576.2',
'71.0.3576.1',
'71.0.3576.0',
'70.0.3538.57',
'70.0.3538.56',
'71.0.3575.2',
'70.0.3538.55',
'69.0.3497.126',
'70.0.3538.54',
'71.0.3575.1',
'71.0.3575.0',
'71.0.3574.1',
'71.0.3574.0',
'70.0.3538.53',
'69.0.3497.125',
'70.0.3538.52',
'71.0.3573.1',
'71.0.3573.0',
'70.0.3538.51',
'69.0.3497.124',
'71.0.3572.1',
'71.0.3572.0',
'70.0.3538.50',
'69.0.3497.123',
'71.0.3571.2',
'70.0.3538.49',
'69.0.3497.122',
'71.0.3571.1',
'71.0.3571.0',
'70.0.3538.48',
'69.0.3497.121',
'71.0.3570.1',
'71.0.3570.0',
'70.0.3538.47',
'69.0.3497.120',
'71.0.3568.2',
'71.0.3569.1',
'71.0.3569.0',
'70.0.3538.46',
'69.0.3497.119',
'70.0.3538.45',
'71.0.3568.1',
'71.0.3568.0',
'70.0.3538.44',
'69.0.3497.118',
'70.0.3538.43',
'70.0.3538.42',
'71.0.3567.1',
'71.0.3567.0',
'70.0.3538.41',
'69.0.3497.117',
'71.0.3566.1',
'71.0.3566.0',
'70.0.3538.40',
'69.0.3497.116',
'71.0.3565.1',
'71.0.3565.0',
'70.0.3538.39',
'69.0.3497.115',
'71.0.3564.1',
'71.0.3564.0',
'70.0.3538.38',
'69.0.3497.114',
'71.0.3563.0',
'71.0.3562.2',
'70.0.3538.37',
'69.0.3497.113',
'70.0.3538.36',
'70.0.3538.35',
'71.0.3562.1',
'71.0.3562.0',
'70.0.3538.34',
'69.0.3497.112',
'70.0.3538.33',
'71.0.3561.1',
'71.0.3561.0',
'70.0.3538.32',
'69.0.3497.111',
'71.0.3559.6',
'71.0.3560.1',
'71.0.3560.0',
'71.0.3559.5',
'71.0.3559.4',
'70.0.3538.31',
'69.0.3497.110',
'71.0.3559.3',
'70.0.3538.30',
'69.0.3497.109',
'71.0.3559.2',
'71.0.3559.1',
'71.0.3559.0',
'70.0.3538.29',
'69.0.3497.108',
'71.0.3558.2',
'71.0.3558.1',
'71.0.3558.0',
'70.0.3538.28',
'69.0.3497.107',
'71.0.3557.2',
'71.0.3557.1',
'71.0.3557.0',
'70.0.3538.27',
'69.0.3497.106',
'71.0.3554.4',
'70.0.3538.26',
'71.0.3556.1',
'71.0.3556.0',
'70.0.3538.25',
'71.0.3554.3',
'69.0.3497.105',
'71.0.3554.2',
'70.0.3538.24',
'69.0.3497.104',
'71.0.3555.2',
'70.0.3538.23',
'71.0.3555.1',
'71.0.3555.0',
'70.0.3538.22',
'69.0.3497.103',
'71.0.3554.1',
'71.0.3554.0',
'70.0.3538.21',
'69.0.3497.102',
'71.0.3553.3',
'70.0.3538.20',
'69.0.3497.101',
'71.0.3553.2',
'69.0.3497.100',
'71.0.3553.1',
'71.0.3553.0',
'70.0.3538.19',
'69.0.3497.99',
'69.0.3497.98',
'69.0.3497.97',
'71.0.3552.6',
'71.0.3552.5',
'71.0.3552.4',
'71.0.3552.3',
'71.0.3552.2',
'71.0.3552.1',
'71.0.3552.0',
'70.0.3538.18',
'69.0.3497.96',
'71.0.3551.3',
'71.0.3551.2',
'71.0.3551.1',
'71.0.3551.0',
'70.0.3538.17',
'69.0.3497.95',
'71.0.3550.3',
'71.0.3550.2',
'71.0.3550.1',
'71.0.3550.0',
'70.0.3538.16',
'69.0.3497.94',
'71.0.3549.1',
'71.0.3549.0',
'70.0.3538.15',
'69.0.3497.93',
'69.0.3497.92',
'71.0.3548.1',
'71.0.3548.0',
'70.0.3538.14',
'69.0.3497.91',
'71.0.3547.1',
'71.0.3547.0',
'70.0.3538.13',
'69.0.3497.90',
'71.0.3546.2',
'69.0.3497.89',
'71.0.3546.1',
'71.0.3546.0',
'70.0.3538.12',
'69.0.3497.88',
'71.0.3545.4',
'71.0.3545.3',
'71.0.3545.2',
'71.0.3545.1',
'71.0.3545.0',
'70.0.3538.11',
'69.0.3497.87',
'71.0.3544.5',
'71.0.3544.4',
'71.0.3544.3',
'71.0.3544.2',
'71.0.3544.1',
'71.0.3544.0',
'69.0.3497.86',
'70.0.3538.10',
'69.0.3497.85',
'70.0.3538.9',
'69.0.3497.84',
'71.0.3543.4',
'70.0.3538.8',
'71.0.3543.3',
'71.0.3543.2',
'71.0.3543.1',
'71.0.3543.0',
'70.0.3538.7',
'69.0.3497.83',
'71.0.3542.2',
'71.0.3542.1',
'71.0.3542.0',
'70.0.3538.6',
'69.0.3497.82',
'69.0.3497.81',
'71.0.3541.1',
'71.0.3541.0',
'70.0.3538.5',
'69.0.3497.80',
'71.0.3540.1',
'71.0.3540.0',
'70.0.3538.4',
'69.0.3497.79',
'70.0.3538.3',
'71.0.3539.1',
'71.0.3539.0',
'69.0.3497.78',
'68.0.3440.134',
'69.0.3497.77',
'70.0.3538.2',
'70.0.3538.1',
'70.0.3538.0',
'69.0.3497.76',
'68.0.3440.133',
'69.0.3497.75',
'70.0.3537.2',
'70.0.3537.1',
'70.0.3537.0',
'69.0.3497.74',
'68.0.3440.132',
'70.0.3536.0',
'70.0.3535.5',
'70.0.3535.4',
'70.0.3535.3',
'69.0.3497.73',
'68.0.3440.131',
'70.0.3532.8',
'70.0.3532.7',
'69.0.3497.72',
'69.0.3497.71',
'70.0.3535.2',
'70.0.3535.1',
'70.0.3535.0',
'69.0.3497.70',
'68.0.3440.130',
'69.0.3497.69',
'68.0.3440.129',
'70.0.3534.4',
'70.0.3534.3',
'70.0.3534.2',
'70.0.3534.1',
'70.0.3534.0',
'69.0.3497.68',
'68.0.3440.128',
'70.0.3533.2',
'70.0.3533.1',
'70.0.3533.0',
'69.0.3497.67',
'68.0.3440.127',
'70.0.3532.6',
'70.0.3532.5',
'70.0.3532.4',
'69.0.3497.66',
'68.0.3440.126',
'70.0.3532.3',
'70.0.3532.2',
'70.0.3532.1',
'69.0.3497.60',
'69.0.3497.65',
'69.0.3497.64',
'70.0.3532.0',
'70.0.3531.0',
'70.0.3530.4',
'70.0.3530.3',
'70.0.3530.2',
'69.0.3497.58',
'68.0.3440.125',
'69.0.3497.57',
'69.0.3497.56',
'69.0.3497.55',
'69.0.3497.54',
'70.0.3530.1',
'70.0.3530.0',
'69.0.3497.53',
'68.0.3440.124',
'69.0.3497.52',
'70.0.3529.3',
'70.0.3529.2',
'70.0.3529.1',
'70.0.3529.0',
'69.0.3497.51',
'70.0.3528.4',
'68.0.3440.123',
'70.0.3528.3',
'70.0.3528.2',
'70.0.3528.1',
'70.0.3528.0',
'69.0.3497.50',
'68.0.3440.122',
'70.0.3527.1',
'70.0.3527.0',
'69.0.3497.49',
'68.0.3440.121',
'70.0.3526.1',
'70.0.3526.0',
'68.0.3440.120',
'69.0.3497.48',
'69.0.3497.47',
'68.0.3440.119',
'68.0.3440.118',
'70.0.3525.5',
'70.0.3525.4',
'70.0.3525.3',
'68.0.3440.117',
'69.0.3497.46',
'70.0.3525.2',
'70.0.3525.1',
'70.0.3525.0',
'69.0.3497.45',
'68.0.3440.116',
'70.0.3524.4',
'70.0.3524.3',
'69.0.3497.44',
'70.0.3524.2',
'70.0.3524.1',
'70.0.3524.0',
'70.0.3523.2',
'69.0.3497.43',
'68.0.3440.115',
'70.0.3505.9',
'69.0.3497.42',
'70.0.3505.8',
'70.0.3523.1',
'70.0.3523.0',
'69.0.3497.41',
'68.0.3440.114',
'70.0.3505.7',
'69.0.3497.40',
'70.0.3522.1',
'70.0.3522.0',
'70.0.3521.2',
'69.0.3497.39',
'68.0.3440.113',
'70.0.3505.6',
'70.0.3521.1',
'70.0.3521.0',
'69.0.3497.38',
'68.0.3440.112',
'70.0.3520.1',
'70.0.3520.0',
'69.0.3497.37',
'68.0.3440.111',
'70.0.3519.3',
'70.0.3519.2',
'70.0.3519.1',
'70.0.3519.0',
'69.0.3497.36',
'68.0.3440.110',
'70.0.3518.1',
'70.0.3518.0',
'69.0.3497.35',
'69.0.3497.34',
'68.0.3440.109',
'70.0.3517.1',
'70.0.3517.0',
'69.0.3497.33',
'68.0.3440.108',
'69.0.3497.32',
'70.0.3516.3',
'70.0.3516.2',
'70.0.3516.1',
'70.0.3516.0',
'69.0.3497.31',
'68.0.3440.107',
'70.0.3515.4',
'68.0.3440.106',
'70.0.3515.3',
'70.0.3515.2',
'70.0.3515.1',
'70.0.3515.0',
'69.0.3497.30',
'68.0.3440.105',
'68.0.3440.104',
'70.0.3514.2',
'70.0.3514.1',
'70.0.3514.0',
'69.0.3497.29',
'68.0.3440.103',
'70.0.3513.1',
'70.0.3513.0',
'69.0.3497.28',
)
@classmethod
def _extract_urls(cls, webpage):
return re.findall(
r'<iframe[^>]+src=["\']((?:https?://)?%s/%s/[a-zA-Z0-9-_]+)'
% (cls._DOMAINS, cls._EMBED_WORD), webpage)
def _extract_decrypted_page(self, page_url, webpage, video_id, headers):
phantom = PhantomJSwrapper(self, required_version='2.0')
webpage, _ = phantom.get(page_url, html=webpage, video_id=video_id, headers=headers)
return | |
np.array_equal(state, 0):
raise Exception("Game has already ended!")
if explore:
move = env.get_random_action()
move_from = move.from_square
move_to = move.to_square
else:
action_space = env.project_legal_moves() # The environment determines which moves are legal
action_values = self.get_action_values(np.expand_dims(state, axis=0))
action_values = np.reshape(np.squeeze(action_values), (64, 64))
action_values = np.multiply(action_values, action_space)
# get position with maximal index from 64 x 64 matrix.
# Store row index in move_from and column index in move_to.
move_from = np.argmax(action_values, axis=None) // 64
move_to = np.argmax(action_values, axis=None) % 64
return env.validate_move(compose_move(move_from, move_to))
def update(self, prev_state, state, move, reward, state_other, move_other, reward_other, minibatch_size=256):
"""
Update the agent (learning network) using experience replay. Set the sampling probs with the td error.
Args:
prev_state:
previous state board
state:
state of board after move
move:
performed move (move.from_square, move.to_square)
reward:
reward of the move
state_other:
state of board after others move
move_other:
performed move other agent(move.from_square, move.to_square)
reward_other:
reward of the other agent move
"""
if self.learn:
self.reward_trace.append(reward + reward_other)
# update memory for experienced replay
if len(self.memory) > self.memsize:
self.memory.pop(0)
self.sampling_probs.pop(0)
self.memory.append([prev_state, (move.from_square, move.to_square), reward + reward_other, state_other])
self.sampling_probs.append(1)
# update model with prioritized experienced replay
minibatch, indices = self.sample_memory(minibatch_size)
td_errors = self.update_model(minibatch)
for n, i in enumerate(indices):
self.sampling_probs[i] = np.abs(td_errors[n])
# save abs sum of errors in graph:
if (self.writer):
with self.writer.as_default():
tf.summary.scalar('mean time difference error', data=np.mean(np.abs(td_errors)), step=self.writer_step)
self.writer.flush()
self.writer_step += 1
def update_after_game(self):
"""
Called after game.
"""
if self.learn:
# calculate cumulative reward
discounts = np.array([ self.gamma ** i for i in range(0, len(self.reward_trace))])
cumulative_rewards = [ np.sum(discounts[:(len(self.reward_trace) - i)] * self.reward_trace[i:]) for i in range(0, len(self.reward_trace))]
if (self.writer):
with self.writer.as_default():
for i, cumulative_reward in enumerate(cumulative_rewards):
tf.summary.scalar('cumulative reward', cumulative_reward, step=self.writer_step_start_episode + i )
tf.summary.scalar('mean reward episode', np.mean(self.reward_trace), step=self.game_count)
tf.summary.scalar('length episode', len(self.reward_trace), step=self.game_count)
self.writer.flush()
self.writer_step_start_episode += len(cumulative_rewards)
#################################
# Internal functions #
#################################
def init_network(self):
"""
Initialize the network
Returns:
"""
if self.network == 'linear':
self.model = init_linear_network(self.lr)
elif self.network == 'conv':
self.model = init_conv_network(self.lr)
else:
raise NotImplementedError()
def fix_model(self):
"""
The fixed model is the model used for bootstrapping
Returns:
"""
optimizer = SGD(lr=self.lr, momentum=0.0, decay=0.0, nesterov=False)
self.feeding_model = clone_model(self.model)
self.feeding_model.compile(optimizer=optimizer, loss='mse', metrics=['mae'])
self.feeding_model.set_weights(self.model.get_weights())
# determine default epsilon function
@staticmethod
def default_epsilon_func(k):
c = 0.5
min_epsilon = 0.05
if k == 0:
epsilon = 1
else:
epsilon = max(c / k, min_epsilon)
return epsilon
def get_action_values(self, state):
"""
Get action values of a state
Args:
state: np.ndarray with shape (8,8,8)
layer_board representation
Returns:
action values
"""
return self.feeding_model.predict(state) + np.random.randn() * 1e-9
def sample_memory(self, minibatch_size):
"""
Get a sample from memory for experience replay
Args:
minibatch_size: int
size of the minibatch
Returns: tuple
a mini-batch of experiences (list)
indices of chosen experiences
"""
sample_probs = [self.sampling_probs[n] / np.sum(self.sampling_probs) for n in range(len(self.sampling_probs))]
n_values = min(minibatch_size, len(self.memory))
indices = np.random.choice(range(len(self.memory)), n_values, replace=True, p=sample_probs)
minibatch = []
for i in indices:
minibatch.append(self.memory[i])
return minibatch, indices
def update_model(self, minibatch):
"""
Update the Q-network using samples from the minibatch. Does not take into account the other
players move.
Args:
minibatch: list
The minibatch contains the states, moves, rewards and new states.
Returns:
td_errors: np.array
array of temporal difference errors
"""
# Prepare separate lists
states, moves, rewards, new_states = [], [], [], []
td_errors = []
episode_did_not_ends = []
for sample in minibatch:
states.append(sample[0])
moves.append(sample[1])
rewards.append(sample[2])
new_states.append(sample[3])
# Episode end detection
if np.array_equal(sample[3], 0):
episode_did_not_ends.append(0)
else:
episode_did_not_ends.append(1)
# The Q target
q_target = np.array(rewards) + np.array(episode_did_not_ends) * self.gamma * np.max(
self.feeding_model.predict(np.stack(new_states, axis=0)), axis=1)
# The Q value for the remaining actions
q_state = self.model.predict(np.stack(states, axis=0)) # batch x 64 x 64
# Combine the Q target with the other Q values.
q_state = np.reshape(q_state, (len(minibatch), 64, 64))
for idx, move in enumerate(moves):
td_errors.append(q_state[idx, move[0], move[1]] - q_target[idx])
q_state[idx, move[0], move[1]] = q_target[idx]
q_state = np.reshape(q_state, (len(minibatch), 4096))
# Perform a step of minibatch Gradient Descent.
self.model.fit(x=np.stack(states, axis=0), y=q_state, epochs=1, verbose=self.verbose)
return td_errors
class ReinforceAgent(Agent):
"""
REINFORCE: REward Increment = Nonnegative Factor × Offset Reinforcement × Characteristic Eligibility
REINFORCE is the fundamental policy gradient algorithm.
"""
def __init__(self, memsize = 1000, gamma = 0.5, lr = 0.3, verbose = 0, log_dir = None):
"""
Args:
memsize: int
total states to hold in memory for experience replay.
gamma: float
Temporal discount factor
lr: float
Learning rate, ideally
verbose: int
verbose output: 0 or 1.
log_dir:
directory for logging
"""
super().__init__()
self.memsize = memsize
self.gamma = gamma
self.lr = lr
self.verbose = verbose
self.writer = None
if (log_dir):
self.writer = tf.summary.create_file_writer(log_dir)
self.writer_step = 0
self.writer_step_start_episode = 0
self.writer_episode = 0
self.learn_game_count = 0 # only learning games are counted.
self.memory_mean_rewards = []
self.episode_data = []
self.set_learn(True)
self.model = init_conv_pg(self.lr)
class SaveData():
# changing this will break save / load of older models !
def __init__(self, memsize, gamma, lr, learn_game_count, optimizer_weights):
self.memsize = memsize
self.gamma = gamma
self.lr = lr
self.learn_game_count = learn_game_count
self.optimizer_weights = optimizer_weights
class EpisodeData():
def __init__(self, state, move, reward, action_space):
self.state = state
self.move = move
self.reward = reward
self.action_space = action_space
#################################
# External functions #
#################################
def load(self, load_loc):
"""
Saves the agent state.
Args
load_loc: String
file location to load from without extension
"""
try:
self.model.load_weights(load_loc + ".h5")
self.model._make_train_function()
save_data = pickle.load( open( load_loc + ".pkl", "rb" ) )
self.model.optimizer.set_weights(save_data.optimizer_weights)
self.memsize = save_data.memsize
self.gamma = save_data.gamma
self.lr = save_data.lr
self.learn_game_count = save_data.learn_game_count
return True
except:
return False
def save(self, save_loc):
"""
Saves the agent state.
Args
save_loc: String
file location to save to without extension
"""
self.model.save_weights(save_loc + ".h5")
symbolic_weights = getattr(self.model.optimizer, 'weights')
optimizer_weights = K.batch_get_value(symbolic_weights)
save_data = self.SaveData(self.memsize, self.gamma, self.lr, self.learn_game_count, optimizer_weights)
pickle.dump( save_data, open( save_loc + ".pkl", "wb" ) )
def set_learn(self, learn):
"""
Set learning on / off
"""
self.learn = learn
def reset_for_game(self):
"""
Needs to be called before each game.
Each reset will be seen as a new game start.
The feeding network will be updated if necessary.
"""
self.episode_data = []
if self.learn:
self.learn_game_count += 1
def determine_move(self, env, white_player):
"""
Determine next move
Args:
env: Board
environment of board.
white_player: boolean
Is the current player white?
Returns: move
"""
state = env.state()
if np.array_equal(state, 0):
raise Exception("Game has already ended!")
action_space = env.project_legal_moves() # The environment determines which moves are legal
self.temp_action_space = action_space # hack to get action space in update function.
action_probs = self.model.predict([np.expand_dims(state, axis=0),
np.zeros((1, 1)),
action_space.reshape(1, 4096)])
action_probs = clip_probs(action_probs)
# get position from 64 x 64 matrix.
# Store row index in move_from and column index in move_to.
move = np.random.choice(range(4096), p=np.squeeze(action_probs))
move_from = move // 64
move_to = move % 64
if (self.learn and self.writer):
with self.writer.as_default():
tf.summary.scalar('probability max move', np.max(action_probs), step=self.writer_step )
self.writer_step += 1
return env.validate_move(compose_move(move_from, move_to))
def update(self, prev_state, state, move, reward, state_other, move_other, reward_other, minibatch_size=256):
"""
Store episode data.
Args:
prev_state:
previous state board
state:
state of board after move
move:
performed move (move.from_square, move.to_square)
reward:
reward of the move
state_other:
state of board after others move
move_other:
performed move other agent(move.from_square, move.to_square)
reward_other:
reward of the other agent move
"""
# add data that will be used at end of episode
data = self.EpisodeData(prev_state, move, reward + reward_other, self.temp_action_space)
self.episode_data.append(data)
def update_after_game(self):
"""
Called after game.
"""
if self.learn:
cumulative_rewards = self.update_model()
if (self.writer):
with self.writer.as_default():
for i, cumulative_reward in enumerate(cumulative_rewards):
tf.summary.scalar('cumulative reward', cumulative_reward, step=self.writer_step_start_episode + i )
tf.summary.scalar('mean reward episode', np.mean(cumulative_rewards), step=self.writer_episode)
tf.summary.scalar('length episode', len(cumulative_rewards), step=self.writer_episode)
self.writer.flush()
self.writer_step_start_episode += len(cumulative_rewards)
self.writer_episode += 1
#################################
# Internal functions #
#################################
def update_model(self):
"""
Update model with Monte Carlo Policy Gradient algorithm needs data of entire episode.
Returns:
cumulative_rewards in episode
"""
n_steps = len(self.episode_data)
discounts = np.array([ self.gamma ** i for i in range(0, len(self.episode_data))])
cumulative_rewards = [ np.sum(discounts[:(n_steps - i)] * self.episode_data[i].reward) for i in range(0, n_steps)]
states = [ self.episode_data[i].state for i in range(0, n_steps)]
| |
vec1 = coords[j] - coords[i]
vec2 = coords[k] - coords[i]
if all(np.linalg.norm(vec) < max_triple_dist for vec in (vec1, vec2)):
avg_vec = (vec1 + vec2) / 2.0
possible_axes.append(avg_vec/np.linalg.norm(avg_vec))
perp_vec = np.cross(vec1, vec2)
possible_axes.append(perp_vec/np.linalg.norm(perp_vec))
unique_possible_axes = strip_identical_and_inv_axes(possible_axes,
sim_axis_tol)
return unique_possible_axes
def is_same_under_n_fold(pcoords, axis, n, m=1, tol=0.25,
excluded_pcoords=None):
"""
Does applying an n-fold rotation about an axis generate the same structure
back again?
:param pcoords: (np.ndarray) shape = (n_unique_atom_types, n_atoms, 3)
:param axis: (np.ndarray) shape = (3,)
:param n: (int) n-fold of this rotation
:param m: (int) Apply this n-fold rotation m times
:param tol: (float)
:param excluded_pcoords: (list)
:return: (bool)
"""
n_unique, n_atoms, _ = pcoords.shape
rotated_coords = np.array(pcoords, copy=True)
rot_mat = rotation_matrix(axis, theta=(2.0 * np.pi * m / n))
excluded = [False for _ in range(n_unique)]
for i in range(n_unique):
# Rotate these coordinates
rotated_coords[i] = rot_mat.dot(rotated_coords[i].T).T
dist_mat = distance_matrix(pcoords[i], rotated_coords[i])
# If all elements are identical then carry on with the next element
if np.linalg.norm(dist_mat) < tol:
continue
# If the RMS between the closest pairwise distance for each atom is
# above the threshold then these structures are not the same
if np.linalg.norm(np.min(dist_mat, axis=1)) > tol:
return False
if excluded_pcoords is not None:
# If these rotated coordinates are similar to those on the excluded
# list then these should not be considered identical
if any(np.linalg.norm(rotated_coords[i] - pcoords[i])
< tol for pcoords in excluded_pcoords):
excluded[i] = True
# This permutation has already been found - return False even though
# it's the same, because there is an excluded list
if all(excluded):
return False
# Add to a list of structures that have already been generated by rotations
if excluded_pcoords is not None:
excluded_pcoords.append(rotated_coords)
return True
def cn_and_axes(molecule, max_n, dist_tol):
"""
Find the highest symmetry rotation axis
:param molecule: (otherm.Molecule)
:param max_n:
:param dist_tol:
:return:
"""
axes = get_possible_axes(coords=molecule.coords())
pcoords = molecule.pcoords()
# Cn numbers and their associated axes
cn_assos_axes = {i: [] for i in range(2, max_n+1)}
for axis in axes:
# Minimum n-fold rotation is 2
for n in range(2, max_n+1):
if is_same_under_n_fold(pcoords, axis, n=n, tol=dist_tol):
cn_assos_axes[n].append(axis)
return cn_assos_axes
def calc_symmetry_number(molecule, max_n_fold_rot_searched=6, dist_tol=0.25):
"""
Calculate the symmetry number of a molecule.
Based on Theor Chem Account (2007) 118:813–826
:param molecule:
:param max_n_fold_rot_searched:
:param dist_tol:
:return:
"""
# Ensure the origin is at the center of mass
if np.max(molecule.com) > 0.1:
molecule.shift_to_com()
# Get the highest Cn-fold rotation axis
cn_axes = cn_and_axes(molecule, max_n_fold_rot_searched, dist_tol)
# If there are no C2 or greater axes then this molecule is C1 → σ=1
if all(len(cn_axes[n]) == 0 for n in cn_axes.keys()):
return 1
pcoords = molecule.pcoords()
sigma_r = 1 # Already has E symmetry
added_pcoords = []
# For every possible axis apply C2, C3...C_n_max rotations
for n, axes in cn_axes.items():
for axis in axes:
# Apply this rotation m times e.g. once for a C2 etc.
for m in range(1, n):
# If the structure is the same but and has *not* been generated
# by another rotation increment the symmetry number by 1
if is_same_under_n_fold(pcoords, axis, n=n, m=m,
tol=dist_tol,
excluded_pcoords=added_pcoords):
sigma_r += 1
if molecule.is_linear():
# There are perpendicular C2s the point group is D∞h
if sigma_r > 2:
return 2
# If not then C∞v and the symmetry number is 1
else:
return 1
return sigma_r
def calc_moments_of_inertia(xyz_list):
"""
From a list of xyzs compute the matrix of moments of inertia
:param xyz_list: List of xyzs in the format [[C, 0.0000, 0.0000, 0.0000], ....]
:return: The matrix
"""
i_matrix = np.zeros([3, 3])
for xyz_line in xyz_list:
atom_label, x_ang, y_ang, z_ang = xyz_line
x, y, z = Constants.ang_to_m * x_ang, Constants.ang_to_m * y_ang, Constants.ang_to_m * z_ang
atom_mass_kg = Constants.atomic_masses[atom_label] * Constants.amu_to_kg
i_matrix[0, 0] += atom_mass_kg * (y**2 + z**2)
i_matrix[0, 1] += -atom_mass_kg * (x * y)
i_matrix[0, 2] += -atom_mass_kg * (x * z)
i_matrix[1, 0] += -atom_mass_kg * (y * x)
i_matrix[1, 1] += atom_mass_kg * (x**2 + z**2)
i_matrix[1, 2] += -atom_mass_kg * (y * z)
i_matrix[2, 0] += -atom_mass_kg * (z * x)
i_matrix[2, 1] += -atom_mass_kg * (z * y)
i_matrix[2, 2] += atom_mass_kg * (x**2 + y**2)
return i_matrix
def calc_q_trans_igm(molecule, ss, temp):
"""
Calculate the translational partition function using the PIB model,
coupled with an effective volume
:param molecule: (otherm.Molecule)
:param ss: (str) Standard state to use {1M, 1atm}
:param temp: (float) Temperature in K
:return: (float) Translational partition function q_trns
"""
if ss.lower() == '1atm':
effective_volume = Constants.k_b * temp / Constants.atm_to_pa
elif ss.lower() == '1m':
effective_volume = 1.0 / (Constants.n_a * (1.0 / Constants.dm_to_m)**3)
else:
raise NotImplementedError
q_trans = ((2.0 * np.pi * molecule.mass * Constants.k_b * temp /
Constants.h**2)**1.5 * effective_volume)
return q_trans
def calc_q_rot_igm(molecule, temp):
"""
Calculate the rotational partition function using the IGM method. Uses the
rotational symmetry number, default = 1
:param molecule: (otherm.Molecule)
:param temp: (float) Temperature in K
:return: (float) Rotational partition function q_rot
"""
i_mat = calc_moments_of_inertia(molecule.xyzs)
omega = Constants.h**2 / (8.0 * np.pi**2 * Constants.k_b * i_mat)
if molecule.n_atoms == 1:
return 1
else:
# Product of the diagonal elements
omega_prod = omega[0, 0] * omega[1, 1] * omega[2, 2]
return temp**1.5/molecule.sigma_r * np.sqrt(np.pi / omega_prod)
def calc_q_vib_igm(molecule, temp):
"""
Calculate the vibrational partition function using the IGM method.
Uses the rotational symmetry number, default = 1
:param molecule: (otherm.Molecule)
:param temp: (float) Temperature in K
:return: (float) Vibrational partition function q_rot
"""
if molecule.n_atoms == 1:
molecule.q_vib = 1
return molecule.q_vib
for freq in molecule.real_vib_freqs():
x = freq * Constants.c_in_cm * Constants.h / Constants.k_b
molecule.q_vib *= np.exp(-x / (2.0 * temp)) / (1.0 - np.exp(-x / temp))
return molecule.q_vib
def calc_s_trans_pib(molecule, ss, temp):
"""Calculate the translational entropy using a particle in a box model
:param molecule: (otherm.Molecule)
:param ss: (str) Standard state to use for calculating the effective box
size in the q_trans calculation
:param temp: (float) Temperature in K
:return: (float) S_trans in J K-1 mol-1
"""
q_trans = calc_q_trans_igm(molecule, ss=ss, temp=temp)
return Constants.r * (np.log(q_trans) + 1.0 + 1.5)
def calc_s_rot_rr(molecule, temp):
"""
Calculate the rigid rotor (RR) entropy
:param molecule: (otherm.Molecule)
:return: (float) S_rot in J K-1 mol-1
"""
if molecule.n_atoms == 1:
return 0
q_rot = calc_q_rot_igm(molecule, temp=temp)
if molecule.is_linear():
return Constants.r * (np.log(q_rot) + 1.0)
else:
return Constants.r * (np.log(q_rot) + 1.5)
def calc_igm_s_vib(molecule, temp):
"""
Calculate the entropy of a molecule according to the Ideal Gas Model (IGM)
RRHO method
:param molecule: (otherm.Molecule)
:param temp: (float) Temperature in K
:return: (float) S_vib in J K-1 mol-1
"""
s = 0.0
for freq in molecule.real_vib_freqs():
x = freq * Constants.c_in_cm * Constants.h / (Constants.k_b * temp)
s += Constants.r * ((x / (np.exp(x) - 1.0)) - np.log(1.0 - np.exp(-x)))
return s
def calc_truhlar_s_vib(molecule, temp, shift_freq):
"""
Calculate the entropy of a molecule according to the Truhlar's method of
shifting low frequency modes
:param molecule: (otherm.Molecule)
:param temp: (float) Temperature in K
:param shift_freq: (float) Shift all frequencies to this value
:return: (float) S_vib in J K-1 mol-1
"""
s = 0
for freq in molecule.real_vib_freqs():
# Threshold lower bound of the frequency
freq = max(freq, shift_freq)
x = freq * Constants.c_in_cm * Constants.h / Constants.k_b
s += Constants.r * (((x / temp) / (np.exp(x / temp) - 1.0)) -
np.log(1.0 - np.exp(-x / temp)))
return s
def calc_grimme_s_vib(molecule, temp, omega_0, alpha):
"""
Calculate the entropy according to Grimme's qRRHO method of RR-HO
interpolation in Chem. Eur. J. 2012, 18, 9955
:param molecule: (otherm.Molecule)
:param temp: (float) Temperature in K
:param omega_0: (float) ω0 parameter
:param alpha: (float) α parameter
:return: (float) S_vib in J K-1 mol-1
"""
s = 0.0
i_mat = calc_moments_of_inertia(molecule.xyzs)
# Average I = (I_xx + I_yy + I_zz) / 3.0
b_avg = np.trace(i_mat) / 3.0
for freq in molecule.real_vib_freqs():
omega = freq * Constants.c_in_cm
mu = Constants.h / (8.0 * np.pi**2 * omega)
mu_prime = (mu * b_avg) / (mu + b_avg)
x = freq | |
# Created by fshaw at 13/09/2018
from dal.copo_da import Submission, DataFile
import requests, os
import json
import traceback
from bson import ObjectId
from urllib.request import quote
from collections import namedtuple
from web.apps.web_copo.schemas.utils.cg_core.cg_schema_generator import CgCoreSchemas
from web.apps.web_copo.schemas.utils.data_utils import get_base_url
from urllib.parse import urljoin
from dal import cursor_to_list
from submission.helpers import generic_helper as ghlper
class DspaceSubmit:
def __init__(self, submission_id=str()):
self.submission_id = submission_id
self.host = None
self.username = None
self.password = <PASSWORD>
self.login_details = None
self.dspace_type = None
def submit(self):
"""
function manages the submission of objects to ckan
:return:
"""
if not self.submission_id:
return dict(status=False, message='Submission identifier not found!')
# retrieve submssion record from db
# specify filtering
filter_by = dict(_id=ObjectId(str(self.submission_id)))
# specify projection
query_projection = {
"_id": 1,
"repository_docs.url": 1,
"repository_docs.username": 1,
"repository_docs.password": 1,
"meta.type": 1,
"meta.params": 1,
"complete": 1
}
doc = Submission().get_collection_handle().aggregate(
[
{"$addFields": {
"destination_repo_converted": {
"$convert": {
"input": "$destination_repo",
"to": "objectId",
"onError": 0
}
}
}
},
{
"$lookup":
{
"from": "RepositoryCollection",
"localField": "destination_repo_converted",
"foreignField": "_id",
"as": "repository_docs"
}
},
{
"$project": query_projection
},
{
"$match": filter_by
}
])
records = cursor_to_list(doc)
# get submission record
try:
submission_record = records[0]
except Exception as ex:
ghlper.logging_error(traceback.format_exc(), self.submission_id)
message = "Submission record not found. Please try resubmitting."
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
try:
repository_info = submission_record['repository_docs'][0]
except Exception as ex:
ghlper.logging_error(traceback.format_exc(), self.submission_id)
error_type = type(ex).__name__
message = f"Couldn't retrieve repository information due to the following error: '{error_type}'"
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
if str(submission_record.get("complete", False)).lower() == 'true':
message = 'Submission is marked as complete!'
ghlper.logging_info(message, self.submission_id)
ghlper.update_submission_status(status='success', message=message, submission_id=self.submission_id)
return dict(status=True, message=message)
# set submission parameters
self.host = repository_info.get("url", str())
self.username = repository_info.get("username", str())
self.password = repository_info.get("password", str())
# authenticate against the repository
try:
authentication_result = self._do_dspace_authenticate()
if authentication_result['status'] is not True:
return authentication_result
except Exception as ex:
user_message = f"DSpace Authentication error" # risk of exposing login credentials
ghlper.logging_error(traceback.format_exc(), self.submission_id)
ghlper.update_submission_status(status='error', message=user_message, submission_id=self.submission_id)
return dict(status='error', message=user_message)
login_details, dspace_type = authentication_result['value']
self.login_details = login_details
self.dspace_type = dspace_type
# check submission context and select submission pathway
type = submission_record.get("meta", dict()).get("type", str())
params = submission_record.get("meta", dict()).get("params", dict())
if type == "new": # create a dataset to submit
return self._do_item_create_submit(**params)
if type == "existing": # a dataset specified proceed to submit
return self._do_item_submit(**params)
return dict(status=True, message="No status message provided!")
def _do_dspace_authenticate(self):
"""
function authenticates against the dspace repository to facilitate interactions
:return:
"""
login_url = urljoin(self.host, '/rest/login')
# try to login using v6 method
# special characters must be urlencoded (but only for version 6!)
param_string = "?email=" + quote(self.username) + "&password=" + self.password
try:
response = requests.post(login_url + param_string)
except Exception as ex:
ghlper.logging_error(traceback.format_exc(), self.submission_id)
message = f"DSpace Authentication error" # risk of exposing login credentials
ghlper.logging_error(traceback.format_exc(), self.submission_id)
return dict(status='error', message=message)
response_status_code = response.status_code
if response_status_code != 200:
# try using v5 method
params = dict(email=self.username, password=<PASSWORD>)
try:
response = requests.post(login_url, json=params)
except Exception as ex:
ghlper.logging_error(traceback.format_exc(), self.submission_id)
message = f"DSpace Authentication error" # risk of exposing login credentials
ghlper.logging_error(traceback.format_exc(), self.submission_id)
return dict(status='error', message=message)
response_status_code = response.status_code
if response_status_code != 200:
error_code = response.status_code
message = f"DSpace Authentication error: '{str(error_code)}'"
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
try:
login_details = response.cookies["JSESSIONID"]
dspace_type = 6
except KeyError:
login_details = response.content
dspace_type = 5
return dict(status=True, value=(login_details, dspace_type))
def _do_item_submit(self, **params):
"""
function fulfills submission given item identifier
:param params:
:return:
"""
# get collection id for which a new item is to be created
item_id = params.get("identifier", str())
if not item_id:
message = 'Missing item identifier! Please try resubmitting.'
ghlper.logging_error(message, self.submission_id)
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
return self._submit_datafiles(item_id=item_id)
def _do_item_create_submit(self, **params):
"""
function creates a new item to fulfill submission
:return:
"""
# get collection id for which a new item is to be created
collection_id = params.get("identifier", str())
if not collection_id:
message = 'Missing collection identifier! Please try resubmitting.'
ghlper.logging_error(message, self.submission_id)
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
# convert to DSpace metadata
try:
submission_metadata = self._get_submission_metadata()
except Exception as ex:
ghlper.logging_error(traceback.format_exc(), self.submission_id)
message = f"Error converting from CG Core to DSpace: '{str(ex)}'"
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
# create item
call_url = urljoin(self.host, f"/rest/collections/{collection_id}/items")
dspace_meta = dict(metadata=submission_metadata)
if self.dspace_type == 6:
try:
response = requests.post(call_url, json=dspace_meta,
headers={"Content-Type": "application/json",
"accept": "application/json"},
cookies={"JSESSIONID": self.login_details})
except Exception as ex:
ghlper.logging_error(traceback.format_exc(), self.submission_id)
message = f"Error creating DSpace item: '{str(ex)}'"
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
elif self.dspace_type == 5:
try:
response = requests.post(call_url, json=dspace_meta,
headers={"rest-dspace-token": self.login_details,
"Content-Type": "application/json",
"accept": "application/json"})
except Exception as ex:
ghlper.logging_error(traceback.format_exc(), self.submission_id)
message = f"Error creating DSpace item: '{str(ex)}'"
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
if response.status_code == 200:
response_data = response.json()
else:
error_message = response.reason
message = f"Error creating DSpace item.'{error_message}'"
ghlper.logging_error(message, self.submission_id)
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
item_id = response_data.get("id", str()) or response_data.get("uuid", str())
if not item_id:
message = f"Error creating DSpace item. Couldn't obtain item identifier."
ghlper.logging_error(message, self.submission_id)
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
return self._submit_datafiles(item_id=item_id)
def _get_submission_metadata(self):
"""
function composes the metadata for a new dataset creation
:return:
"""
submission_metadata = list()
# get user data
description_metadata = Submission().get_submission_metadata(submission_id=self.submission_id)["meta"]
# get metadata language
lang = [x.get("vals", str()) for x in description_metadata if x.get("dc", str()) == "dc.language"]
lang = lang[0] if lang else str()
if isinstance(lang, list):
lang = lang[0]
# predefined fields
try:
url = get_base_url()
submission_metadata.append(
dict(key="dc.relation.ispartof", value=urljoin(url, 'copo/resolve/' + self.submission_id),
language=lang))
except Exception as ex:
ghlper.logging_error(traceback.format_exc(), self.submission_id)
# define a mapping from cgcore to dspace fields
MetaMap = namedtuple('MetaMap', ['repo', 'cgcore'])
schema_mappings = [
MetaMap(repo="dc.contributor.author", cgcore="dc.creator"),
MetaMap(repo="", cgcore="dc.relation isPartOf"), # doing this penalises the cgcore field
]
# map defined fields first
for mapping in schema_mappings:
target_val = [x for x in description_metadata if x.get("dc", str()) == mapping.cgcore]
if not target_val:
continue
target_dict = target_val[0]
target_val = target_dict.get("vals", str())
# remove mapped entry from list
description_metadata.remove(target_dict)
# can't map unspecified repository field
if not mapping.repo:
continue
if isinstance(target_val, str) and target_val.strip() != "":
submission_metadata.append(dict(key=mapping.repo, value=target_val, language=lang))
# set one value from the list that isn't empty
elif isinstance(target_val, list):
target_val = [x for x in target_val if str(x).strip() != ""]
if target_val:
submission_metadata.append(dict(key=mapping.repo, value=target_val[0], language=lang))
# now map non-predefined entries
for target_dict in description_metadata:
# process key
prefix = target_dict.get("prefix", "dc")
key = target_dict.get("dc", str()).replace('dc.', f'{prefix}.', 1)
key = '.'.join(key.split())
key = '.'.join(key.split('.type='))
key = key.lower()
# process value
target_val = target_dict.get("vals", str())
if isinstance(target_val, str) and target_val.strip() != "":
submission_metadata.append(dict(key=key, value=target_val, language=lang))
# set one value from the list that isn't empty
elif isinstance(target_val, list):
target_val = [x for x in target_val if str(x).strip() != ""]
if target_val:
submission_metadata.append(dict(key=key, value=target_val[0], language=lang))
return submission_metadata
def _submit_datafiles(self, item_id=str()):
"""
function uploads files to DSpace given an item
:param item_id:
:return:
"""
submission_record = Submission().get_collection_handle().find_one({'_id': ObjectId(self.submission_id)},
{"bundle_meta": 1})
# get files to upload
datafiles = submission_record.get("bundle_meta", list())
# set post parameters
headers = {"Content-Type": "application/json", "accept": "application/json"}
policy = [{"action": "DEFAULT_*", "epersonId": -1, "groupId": 0, "resourceId": 47166,
"resourceType": "bitstream", "rpDescription": None, "rpName": None, "rpType": "TYPE_INHERITED",
"startDate": None, "endDate": None}]
for df in datafiles:
# # check for already uploaded file
# if str(df.get("upload_status", False)).lower() == 'true':
# continue
file_basename = os.path.basename(df.get("file_path", str()))
filename, file_extension = os.path.splitext(file_basename)
file_extension = file_extension.lstrip(".")
file_mimetype = self.get_media_type_from_file_ext(file_extension)
name = description = filename
bitstream_url = urljoin(self.host,
f"/rest/items/{str(item_id)}/bitstreams?name={name}&description={description}")
bitstream = dict(
name=name,
description=description,
type="bitstream",
format=file_mimetype,
bundleName="ORIGINAL",
policies=policy
)
# request new bitstream
if self.dspace_type == 6:
try:
response = requests.post(bitstream_url, data=bitstream, headers=headers,
cookies={"JSESSIONID": self.login_details})
except Exception as ex:
ghlper.logging_error(traceback.format_exc(), self.submission_id)
message = f"Error obtaining DSpace bitstream: '{str(ex)}'"
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
elif self.dspace_type == 5:
try:
response = requests.post(bitstream_url, json=bitstream,
headers={"rest-dspace-token": self.login_details,
"accept": "application/json"})
except Exception as ex:
ghlper.logging_error(traceback.format_exc(), self.submission_id)
message = f"Error obtaining DSpace bitstream: '{str(ex)}'"
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
if response.status_code == 200:
response_data = response.json()
else:
error_message = response.reason
message = f"Error obtaining DSpace bitstream.'{error_message}'"
ghlper.logging_error(message, self.submission_id)
ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)
return dict(status='error', message=message)
data_id = response_data.get("id", str()) or response_data.get("uuid", str())
if not data_id:
message = "Error uploading datafile. Couldn't obtain bitstream identifier."
ghlper.logging_error(message, self.submission_id)
ghlper.update_submission_status(status='error', message=message, | |
<reponame>deepthiinduri/TRACK_THE_COVID<filename>symptomanalyser.py
from tkinter import *
from PIL import ImageTk, Image ,ImageDraw, ImageFont, ImageFilter
import tkinter.ttk as ttk
from ttkthemes import ThemedStyle
import webbrowser
import ctypes
def Syptom_analyser():
newWindow = Toplevel()
newWindow.title("SYMPTOM ANALYSER")
newWindow.state('zoomed')
newWindow.iconbitmap(r'C:\Users\DELL\Downloads\coronavirus_image_UXL_icon.ico')
labe1 = Label(newWindow, text = " SYMPTOM ANALYSER " , font = "Times 25 bold roman" , pady = 5, padx = 1550 ,fg = "#EC4D37", bg = "black").pack()
render = ImageTk.PhotoImage(Image.open ("Images/symptoms covid.png").resize((1550,180) , Image.ANTIALIAS))
img = Label(newWindow, image = render, padx = 100)
img.image = render
img.pack()
canvas = Canvas(newWindow, height = 800, width = 1550)
canvas.pack()
label1 = Label(canvas, text = " Age :" ,font = "Times 16 bold roman")
label1_canvas = canvas.create_window(100, 50, window = label1)
def show():
op1 = n1.get()
op2 = n2.get()
op3 = v.get()
op4 = CheckVar1.get()
op5 = CheckVar2.get()
op6 = CheckVar3.get()
op7 = CheckVar4.get()
op8 = CheckVar5.get()
op9 = CheckVar6.get()
op10 = CheckVar7.get()
op11 = CheckVar8.get()
op12 = CheckVar9.get()
total = op5 + op6 + op7 + op8+ op9 + op10 + op11 + op12
MessageBox = ctypes.windll.user32.MessageBoxW
if(op1 and op2 and op3 and (op4 or op5 or op6 or op7 or op8 or op9 or op10 or op11 or op12)):
if(op4==1) and (op5==1 or op6==1 or op7==1 or op8==1 or op9==1 or op10==1 or op11==1 or op12==1):
MessageBox(None, ' Enter only None if there are no symptoms. ', ' Error! ', 0)
elif(op1.isspace() or op2.isspace()):
MessageBox(None, ' Please enter all the fields. ', ' Alert! ', 0)
elif(op3=='2'):
if(op4==1):
label_result.config(text = "No Risk", fg = "SpringGreen2")
elif(total==8):
if(op1 == " Below 5 years " or op1 == " 5 - 17 years "):
label_result.config(text = '72/100' + "\n" + "High Risk", fg = "red2")
elif(op1 == " 18 - 30 years "):
label_result.config(text = '74/100' + "\n" + "High Risk", fg = "red2")
elif(op1 == " 31 - 60 years "):
if(op2 == " Female "):
label_result.config(text = '77/100' + "\n" + "High Risk", fg = "red3")
else:
label_result.config(text = '76/100' + "\n" + "High Risk", fg = "red3")
elif(op1 == " Above 60 years "):
if(op2 == " Female "):
label_result.config(text = '80/100' + "\n" + "High Risk", fg = "red3")
else:
label_result.config(text = '79/100' + "\n" + "High Risk", fg = "red3")
elif(total==7):
if(op1 == " Below 5 years " or op1 == " 5 - 17 years "):
label_result.config(text = '62/100' + "\n" + "Medium Risk", fg = "red2")
elif(op1 == " 18 - 30 years "):
label_result.config(text = '64/100' + "\n" + "Medium Risk", fg = "red2")
elif(op1 == " 31 - 60 years "):
if(op2 == " Female "):
label_result.config(text = '66/100' + "\n" + "High Risk", fg = "red2")
else:
label_result.config(text = '65/100' + "\n" + "High Risk", fg = "red2")
elif(op1 == " Above 60 years "):
if(op2 == " Female "):
label_result.config(text = '70/100' + "\n" + "High Risk", fg = "red2")
else:
label_result.config(text = '69/100' + "\n" + "High Risk", fg = "red2")
elif(total==6):
if(op1 == " Below 5 years " or op1 == " 5 - 17 years "):
label_result.config(text = '52/100' + "\n" + "Medium Risk", fg = "red")
elif(op1 == " 18 - 30 years "):
label_result.config(text = '54/100' + "\n" + "Medium Risk", fg = "red")
elif(op1 == " 31 - 60 years "):
if(op2 == " Female "):
label_result.config(text = '57/100' + "\n" + "Medium Risk", fg = "red")
else:
label_result.config(text = '56/100' + "\n" + "Medium Risk", fg = "red")
elif(op1 == " Above 60 years "):
if(op2 == " Female "):
label_result.config(text = '60/100' + "\n" + "Medium Risk", fg = "red2")
else:
label_result.config(text = '59/100' + "\n" + "Medium Risk", fg = "red")
elif(total==5):
if(op1 == " Below 5 years " or op1 == " 5 - 17 years "):
label_result.config(text = '43/100' + "\n" + "Medium Risk", fg = "green2")
elif(op1 == " 18 - 30 years "):
label_result.config(text = '45/100' + "\n" + "Medium Risk", fg = "red2")
elif(op1 == " 31 - 60 years "):
if(op2 == " Female "):
label_result.config(text = '48/100' + "\n" + "Medium Risk", fg = "red2")
else:
label_result.config(text = '47/100' + "\n" + "Medium Risk", fg = "red2")
elif(op1 == " Above 60 years "):
if(op2 == " Female "):
label_result.config(text = '50/100' + "\n" + "Medium Risk", fg = "red2")
else:
label_result.config(text = '49/100' + "\n" + "Medium Risk", fg = "red2")
elif(total==4):
if(op1 == " Below 5 years " or op1 == " 5 - 17 years "):
label_result.config(text = '35/100' + "\n" + "Low Risk", fg = "green2")
elif(op1 == " 18 - 30 years "):
label_result.config(text = '36/100' + "\n" + "Low Risk", fg = "green2")
elif(op1 == " 31 - 60 years "):
if(op2 == " Female "):
label_result.config(text = '38/100' + "\n" + "Low Risk", fg = "green2")
else:
label_result.config(text = '39/100' + "\n" + "Low Risk", fg = "green2")
elif(op1 == " Above 60 years "):
if(op2 == " Female "):
label_result.config(text = '41/100' + "\n" + "Medium Risk", fg = "green2")
else:
label_result.config(text = '40/100' + "\n" + "Medium Risk", fg = "green2")
elif(total==3):
if(op1 == " Below 5 years " or op1 == " 5 - 17 years "):
label_result.config(text = '25/100' + "\n" + "Low Risk", fg = "green3")
elif(op1 == " 18 - 30 years "):
label_result.config(text = '28/100' + "\n" + "Low Risk", fg = "green3")
elif(op1 == " 31 - 60 years "):
if(op2 == " Female "):
label_result.config(text = '31/100' + "\n" + "Low Risk", fg = "green2")
else:
label_result.config(text = '30/100' + "\n" + "Low Risk", fg = "green2")
elif(op1 == " Above 60 years "):
if(op2 == " Female "):
label_result.config(text = '33/100' + "\n" + "Low Risk", fg = "green2")
else:
label_result.config(text = '32/100' + "\n" + "Low Risk", fg = "green2")
elif(total==2):
if(op1 == " Below 5 years " or op1 == " 5 - 17 years "):
label_result.config(text = '16/100' + "\n" + "Low Risk", fg = "green3")
elif(op1 == " 18 - 30 years "):
label_result.config(text = '18/100' + "\n" + "Low Risk", fg = "green3")
elif(op1 == " 31 - 60 years "):
if(op2 == " Female "):
label_result.config(text = '21/100' + "\n" + "Medium Risk", fg = "green3")
else:
label_result.config(text = '20/100' + "\n" + "Low Risk", fg = "green3")
elif(op1 == " Above 60 years "):
if(op2 == " Female "):
label_result.config(text = '23/100' + "\n" + "Low Risk", fg = "green3")
else:
label_result.config(text = '22/100' + "\n" + "Low Risk", fg = "green3")
elif(total==1):
if(op1 == " Below 5 years " or op1 == " 5 - 17 years "):
label_result.config(text = '12/100' + "\n" + "Very Low Risk", fg = "green4")
elif(op1 == " 18 - 30 years "):
label_result.config(text = '15/100' + "\n" + "Low Risk", fg = "green3")
elif(op1 == " 31 - 60 years "):
if(op2 == " Female "):
label_result.config(text = '18/100' + "\n" + "Medium Risk", fg = "green3")
else:
label_result.config(text = '17/100' + "\n" + "Low Risk", fg = "green3")
elif(op1 == " Above 60 years "):
if(op2 == " Female "):
label_result.config(text = '21/100' + "\n" + "Medium Risk", fg = "green3")
else:
label_result.config(text = '20/100' + "\n" + "Low Risk", fg = "green3")
elif(op3=='1'):
if(op4==1):
label_result.config(text = '20/100' + "\n" + "Low Risk", fg = "green3")
elif(total==8):
if(op1 == " Below 5 years "):
label_result.config(text = '95/100' + "\n" + "Extreme Risk", fg = "red4")
elif(op1 == " 5 - 17 years "):
label_result.config(text = '96/100' + "\n" + "Extreme Risk", fg = "red4")
elif(op1 == " 18 - 30 years "):
if(op2 == " Female "):
label_result.config(text = '97/100' + "\n" + "Extreme Risk", fg = "red4")
else:
label_result.config(text = '96.5/100' + "\n" + "Extreme Risk", fg = "red4")
elif(op1 == " 31 - | |
# Copyright 2021 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Constrained Quadratic Model class.
"""
import collections.abc as abc
import json
import re
import tempfile
import uuid
import warnings
import zipfile
from numbers import Number
from typing import Hashable, Optional, Union, BinaryIO, ByteString, Iterable, Collection, Dict
from typing import Callable, MutableMapping, Iterator, Tuple, Mapping, Any
import numpy as np
from dimod.core.bqm import BQM as BQMabc
from dimod.binary.binary_quadratic_model import BinaryQuadraticModel, Binary, Spin, as_bqm
from dimod.discrete.discrete_quadratic_model import DiscreteQuadraticModel
from dimod.quadratic import QuadraticModel
from dimod.sym import Comparison, Eq, Le, Ge, Sense
from dimod.serialization.fileview import SpooledTemporaryFile, _BytesIO
from dimod.serialization.fileview import load, read_header, write_header
from dimod.typing import Bias, Variable
from dimod.utilities import new_label
from dimod.variables import Variables, serialize_variable, deserialize_variable
from dimod.vartypes import Vartype, as_vartype, VartypeLike
__all__ = ['ConstrainedQuadraticModel', 'CQM', 'cqm_to_bqm']
CQM_MAGIC_PREFIX = b'DIMODCQM'
class ConstrainedQuadraticModel:
r"""A constrained quadratic model.
Constrained quadratic models are problems of the form:
.. math::
\begin{align}
\text{Minimize an objective:} & \\
& \sum_{i} a_i x_i + \sum_{i<j} b_{ij} x_i x_j + c, \\
\text{Subject to constraints:} & \\
& \sum_i a_i^{(c)} x_i + \sum_{i<j} b_{ij}^{(c)} x_i x_j+ c^{(c)} \le 0,
\quad c=1, \dots, C_{\rm ineq.}, \\
& \sum_i a_i^{(d)} x_i + \sum_{i<j} b_{ij}^{(d)} x_i x_j + c^{(d)} = 0,
\quad d=1, \dots, C_{\rm eq.},
\end{align}
where :math:`\{ x_i\}_{i=1, \dots, N}` can be binary or integer
variables, :math:`a_{i}, b_{ij}, c` are real values and
:math:`C_{\rm ineq.}, C_{\rm eq,}` are the number of inequality and
equality constraints respectively.
The objective and constraints are encoded as either :class:`.QuadraticModel`
or :class:`.BinaryQuadraticModel` depending on the variable types used.
Example:
Solve a simple `bin packing problem <https://w.wiki/3jz4>`_. In this
problem we wish to pack a set of items of different weights into
the smallest number of bins possible.
See :func:`~dimod.generators.bin_packing` for a general function to
generate bin packing problems. We follow the same naming conventions
in this example.
Let's start with four object weights and assume that each bin has a
capacity of 1.
>>> weights = [.9, .7, .2, .1]
>>> capacity = 1
Let :math:`y_j` indicate that we used bin :math:`j`. We know that we
will use four or fewer total bins.
>>> y = [dimod.Binary(f'y_{j}') for j in range(len(weights))]
Let :math:`x_{i,j}` indicate that we put item :math:`i` in bin
:math:`j`.
>>> x = [[dimod.Binary(f'x_{i}_{j}') for j in range(len(weights))]
... for i in range(len(weights))]
Create an empty constrained quadratic model with no objective or
constraints.
>>> cqm = dimod.ConstrainedQuadraticModel()
We wish to minimize the number of bins used. Therefore our objective
is to minimize the value of :math:`\sum_j y_j`.
>>> cqm.set_objective(sum(y))
We also need to enforce the constraint that each item can only go
in one bin. We can express this constraint, for a given item :math:`i`,
with :math:`\sum_j x_{i, j} == 1`. Note that the label of each
constraint is returned so that we can access them in the future if
desired.
>>> for i in range(len(weights)):
... cqm.add_constraint(sum(x[i]) == 1, label=f'item_placing_{i}')
'item_placing_0'
'item_placing_1'
'item_placing_2'
'item_placing_3'
Finally, we need to enforce the limits on each bin. We can express
this constraint, for a given bin :math:`j`, with
:math:`\sum_i x_{i, j} * w_i <= c` where :math:`w_i` is the weight
of item :math:`i` and :math:`c` is the capacity.
>>> for j in range(len(weights)):
... cqm.add_constraint(
... sum(weights[i] * x[i][j] for i in range(len(weights))) - y[j] * capacity <= 0,
... label=f'capacity_bin_{j}')
'capacity_bin_0'
'capacity_bin_1'
'capacity_bin_2'
'capacity_bin_3'
"""
def __init__(self):
# discrete variable tracking, we probably can do this with less memory
# but for now let's keep it simple
self.discrete: Set[Hashable] = set() # collection of discrete constraints
self._discrete: Set[Variable] = set() # collection of all variables used in discrete
self._objective = QuadraticModel()
@property
def constraints(self) -> Dict[Hashable, Comparison]:
"""The constraints as a dictionary.
This dictionary and its contents should not be modified.
"""
try:
return self._constraints
except AttributeError:
pass
self._constraints: Dict[Hashable, Comparison] = {}
return self._constraints
@property
def objective(self) -> QuadraticModel:
"""The objective to be minimized."""
return self._objective
@property
def variables(self) -> Variables:
"""The variables in use over the objective and all constraints."""
try:
return self._variables
except AttributeError:
pass
self._variables = variables = self.objective.variables
# to support backwards compatibility (0.10.0 - 0.10.5), we annotate
# this object with some attributes. All of these will be removed in
# 0.11.0
def vartype(v):
warnings.warn(
"cqm.variables.vartype(v) is deprecated and will be removed in dimod 0.11.0, "
"use cqm.vartype(v) instead.", DeprecationWarning, stacklevel=2)
return self.vartype(v)
variables.vartype = vartype # method
variables.vartypes = _Vartypes(self)
variables.lower_bounds = _LowerBounds(self)
variables.upper_bounds = _UpperBounds(self)
return variables
def _add_variables_from(self, model: Union[BinaryQuadraticModel, QuadraticModel]):
# todo: singledispatchmethod in 3.8+
if isinstance(model, (BinaryQuadraticModel, BQMabc)):
vartype = model.vartype
for v in model.variables:
self.objective.add_variable(vartype, v)
elif isinstance(model, QuadraticModel):
for v in model.variables:
# for spin, binary variables the bounds are ignored anyway
self.objective.add_variable(model.vartype(v), v,
lower_bound=model.lower_bound(v),
upper_bound=model.upper_bound(v))
else:
raise TypeError("model should be a QuadraticModel or a BinaryQuadraticModel")
def add_constraint(self, data, *args, **kwargs) -> Hashable:
"""A convenience wrapper for other methods that add constraints.
Examples:
>>> from dimod import ConstrainedQuadraticModel, Integers
>>> i, j = Integers(['i', 'j'])
>>> cqm = ConstrainedQuadraticModel()
>>> cqm.add_constraint(i + j <= 3, label='Constrained i-j range')
'Constrained i-j range'
See also:
:meth:`~.ConstrainedQuadraticModel.add_constraint_from_model`
:meth:`~.ConstrainedQuadraticModel.add_constraint_from_comparison`
:meth:`~.ConstrainedQuadraticModel.add_constraint_from_iterable`
"""
# in python 3.8+ we can use singledispatchmethod
if isinstance(data, (BinaryQuadraticModel, QuadraticModel, BQMabc)):
return self.add_constraint_from_model(data, *args, **kwargs)
elif isinstance(data, Comparison):
return self.add_constraint_from_comparison(data, *args, **kwargs)
elif isinstance(data, Iterable):
return self.add_constraint_from_iterable(data, *args, **kwargs)
else:
raise TypeError("unexpected data format")
def add_constraint_from_model(self,
qm: Union[BinaryQuadraticModel, QuadraticModel],
sense: Union[Sense, str],
rhs: Bias = 0,
label: Optional[Hashable] = None,
copy: bool = True) -> Hashable:
"""Add a constraint from a quadratic model.
Args:
qm: A quadratic model or binary quadratic model.
sense: One of `<=', '>=', '=='.
rhs: The right hand side of the constraint.
label: A label for the constraint. Must be unique. If no label
is provided, then one is generated using :mod:`uuid`.
copy: If `True`, the BQM is copied. This can be set to `False` to
improve performance, but subsequently mutating the bqm can
cause issues.
Returns:
The label of the added constraint.
Examples:
>>> from dimod import ConstrainedQuadraticModel, Binary
>>> cqm = ConstrainedQuadraticModel()
>>> x = Binary('x')
>>> cqm.add_constraint_from_model(x, '>=', 0, 'Min x')
'Min x'
"""
variables = self.variables
# get sense as an enum
if isinstance(sense, str):
sense = Sense(sense)
if label is None:
# we support up to 100k constraints and :6 gives us 16777216
# possible so pretty safe
label = uuid.uuid4().hex[:6]
while label in self.constraints:
label = uuid.uuid4().hex[:6]
elif label in self.constraints:
raise ValueError("a constraint with that label already exists")
if isinstance(qm, BQMabc):
qm = as_bqm(qm) # handle legacy BQMs
self._add_variables_from(qm)
if copy:
qm = qm.copy()
if sense is Sense.Le:
self.constraints[label] = Le(qm, rhs)
elif sense is Sense.Ge:
self.constraints[label] = Ge(qm, rhs)
elif sense is Sense.Eq:
self.constraints[label] = Eq(qm, rhs)
else:
raise RuntimeError("unexpected sense")
return label
def add_constraint_from_comparison(self,
comp: Comparison,
label: Optional[Hashable] = None,
copy: bool = True) -> Hashable:
"""Add a constraint from a comparison.
Args:
comp: A comparison object.
label: A label for the constraint. Must be unique. If no label
is provided, one is generated using :mod:`uuid`.
copy: If `True`, the model is copied. You can set to `False` to
improve performance, but subsequently mutating the model can
cause issues.
Returns:
Label of the added constraint.
Examples:
>>> from dimod import ConstrainedQuadraticModel, Integer
>>> i = Integer('i')
>>> cqm = ConstrainedQuadraticModel()
>>> cqm.add_constraint_from_comparison(i <= 3, label='Max i')
'Max i'
"""
if not isinstance(comp.rhs, Number):
raise TypeError("comparison should have a numeric rhs")
if isinstance(comp.lhs, (BinaryQuadraticModel, QuadraticModel)):
return self.add_constraint_from_model(comp.lhs, comp.sense, rhs=comp.rhs,
label=label, copy=copy)
else:
raise ValueError("comparison should have a binary quadratic model "
"or quadratic | |
<reponame>AERPAW-Platform-Control/gateway
# Copyright (c) 2014-2018 Barnstormer Softworks, Ltd.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import sys
import inspect
from .core import AM, APIRegistry
class HostPOAs(object):
def __init__ (self, vtsam):
self.am = vtsam
def getARPTable (self, context, sname, client_ids):
if not isinstance(client_ids, list): client_ids = [client_ids]
return self.am._apiv3.poa(context, self.am.urlv3, sname, "api:uh.host:get-arp-table",
options={"client-ids": client_ids})
def getRouteTable (self, context, sname, client_ids):
if not isinstance(client_ids, list): client_ids = [client_ids]
return self.am._apiv3.poa(context, self.am.urlv3, sname, "api:uh.host:get-route-table",
options={"client-ids": client_ids})
def svcStatus (self, context, sname, client_ids):
if not isinstance(client_ids, list): client_ids = [client_ids]
return self.am._apiv3.poa(context, self.am.urlv3, sname, "api:uh.host:supervisor-status",
options={"client-ids": client_ids})
def execcmd (self, context, sname, client_ids, cmd):
if not isinstance(client_ids, list): client_ids = [client_ids]
return self.am._apiv3.poa(context, self.am.urlv3, sname, "api:uh.host:exec",
options={"client-ids": client_ids, "cmd" : cmd})
class v4RouterPOAs(object):
def __init__ (self, vtsam):
self.am = vtsam
def addOSPFNetworks (self, context, sname, client_ids, nets):
"""Add OSPF Networks to areas on the given routers
Args:
context: geni-lib context
sname (str): Slice name
client_ids (list): A list of client-id strings
nets (list): A list of (network, area) tuples
"""
if not isinstance(client_ids, list): client_ids = [client_ids]
return self.am._apiv3.poa(context, self.am.urlv3, sname, "vts:uh.quagga:add-ospf-nets",
options={"client-ids": client_ids, "networks" : nets})
def getRouteTable (self, context, sname, client_ids):
if not isinstance(client_ids, list): client_ids = [client_ids]
return self.am._apiv3.poa(context, self.am.urlv3, sname, "vts:uh.quagga:get-route-table",
options={"client-ids": client_ids})
def getOSPFNeighbors (self, context, sname, client_ids):
if not isinstance(client_ids, list): client_ids = [client_ids]
return self.am._apiv3.poa(context, self.am.urlv3, sname, "vts:uh.quagga:get-ospf-neighbors",
options={"client-ids": client_ids})
class Policy(object):
def __init__ (self, vtsam):
self.am = vtsam
# Policy consent hooks for GDPR-style compliance
def getText (self, context, pid = None):
"""Get the text contents of the policy requested. If no policy is specified and only one policy
exists at the aggregate, that policy text will be returned.
Args:
context: geni-lib context
pid: policy ID (typically from `getversion` output)
Returns:
str: Text contents of policy
"""
opts = {}
if pid:
opts["policy-id"] = pid
return self.am._apiv3.paa(context, self.am.urlv3, "vts:policy:get-text", options = opts)
def giveConsent (self, context, pid):
"""Give consent to the policy indicated for the user URN in the credential used.
Args:
context: geni-lib context
pid: policy ID
"""
return self.am._apiv3.paa(context, self.am.urlv3, "vts:policy:consent", options = {"policy-id" : pid})
def revokeConsent (self, context, pid):
"""Revoke consent from this date forward to the policy indicated for the user URN in the credential used.
Args:
context: geni-lib context
pid: policy ID
"""
return self.am._apiv3.paa(context, self.am.urlv3, "vts:policy:revoke", options = {"policy-id" : pid})
class VTS(AM):
"""Wrapper for all VTS-supported AMAPI functions"""
def __init__ (self, name, host, url = None):
self._host = host
if url is None:
url = "https://%s:3626/foam/gapi/2" % (self._host)
self.urlv3 = "%s3" % (url[:-1])
self._apiv3 = APIRegistry.get("amapiv3")
super(VTS, self).__init__(name, url, "amapiv2", "vts")
self.Host = HostPOAs(self)
self.IPv4Router = v4RouterPOAs(self)
self.Policy = Policy(self)
def allocate (self, context, sname, rspec):
rspec_data = rspec.toXMLString(ucode=True)
manifest = self._apiv3.allocate(context, self.urlv3, sname, rspec_data)
return self.amtype.parseManifest(manifest)
def provision (self, context, sname):
udata = []
for user in context._users:
data = {"urn" : user.urn, "keys" : [open(x, "rb").read() for x in user._keys]}
udata.append(data)
res = self._apiv3.provision(context, self.urlv3, sname, options = {"geni_users" : udata})
if res["code"]["geni_code"] == 0:
return self.amtype.parseManifest(res["value"])
def changeController (self, context, sname, url, datapaths, ofver=None):
options={"controller-url" : url, "datapaths" : datapaths}
if ofver:
options["openflow-version"] = ofver
return self._apiv3.poa(context, self.urlv3, sname, "vts:of:change-controller", options)
def dumpFlows (self, context, sname, datapaths, show_hidden=False):
"""Get the current flows and flow stats from the requested datapaths.
Args:
context: geni-lib context
sname (str): Slice name
datapaths (list): A list of datapath client_id strings
show_hidden (bool): Show hidden flows (if any)
Returns:
dict: Key/Value dictionary of format `{ client_id : [(flow_field, ...), ...] }`
"""
return self._apiv3.poa(context, self.urlv3, sname, "vts:of:dump-flows",
options={"datapaths" : datapaths, "show-hidden" : show_hidden})
def getL2Table (self, context, sname, client_ids):
if not isinstance(client_ids, list): client_ids = [client_ids]
return self._apiv3.poa(context, self.urlv3, sname, "api:l2-switch:get-l2-table",
options={"client-ids" : client_ids})
def clearL2Table (self, context, sname, client_ids):
if not isinstance(client_ids, list): client_ids = [client_ids]
return self._apiv3.poa(context, self.urlv3, sname, "api:uh.vswitch:clear-l2-table",
options={"client-ids" : client_ids})
def clearFlows (self, context, sname, datapaths):
"""Clear all installed flows from the requested datapaths.
Args:
context: geni-lib context
sname (str): Slice name
datapaths (list): A list of datapath client_id strings
"""
return self._apiv3.poa(context, self.urlv3, sname, "vts:of:clear-flows", options={"datapaths" : datapaths})
def portDown (self, context, sname, client_id):
return self._apiv3.poa(context, self.urlv3, sname, "vts:port-down",
options={"port-client-id" : client_id})
def portUp (self, context, sname, client_id):
return self._apiv3.poa(context, self.urlv3, sname, "vts:port-up",
options={"port-client-id" : client_id})
def addFlows (self, context, sname, flows):
return self._apiv3.poa(context, self.urlv3, sname, "vts:of:add-flows", options={"rules" : flows})
def getSTPInfo (self, context, sname, datapaths):
if not isinstance(datapaths, list): datapaths = [datapaths]
return self._apiv3.poa(context, self.urlv3, sname, "vts:l2:stp-info",
options={"datapaths" : datapaths})
def getRSTPInfo (self, context, sname, datapaths):
if not isinstance(datapaths, list): datapaths = [datapaths]
return self._apiv3.poa(context, self.urlv3, sname, "vts:l2:rstp-info",
options={"datapaths" : datapaths})
def getPortInfo (self, context, sname, datapaths):
if not isinstance(datapaths, list): datapaths = [datapaths]
return self._apiv3.poa(context, self.urlv3, sname, "vts:raw:get-port-info",
options={"datapaths" : datapaths})
def setPortBehaviour (self, context, sname, port_list):
port_json_list = []
for (port,obj) in port_list:
port_json_list.append((port, obj.__json__()))
return self._apiv3.poa(context, self.urlv3, sname, "vts:raw:set-port-behaviour",
options={"ports" : port_json_list})
def getLeaseInfo (self, context, sname, client_ids):
if not isinstance(client_ids, list): client_ids = [client_ids]
return self._apiv3.poa(context, self.urlv3, sname, "api:uh.dhcp:get-leases",
options = {"client-ids" : client_ids})
def setPortVLAN (self, context, sname, port_tuples):
if not isinstance(port_tuples, list): port_tuples = [port_tuples]
return self._apiv3.poa(context, self.urlv3, sname, "vts:raw:set-vlan",
options = {"ports" : port_tuples})
def setPortTrunk (self, context, sname, port_list):
if not isinstance(port_list, list): port_list = [port_list]
return self._apiv3.poa(context, self.urlv3, sname, "vts:raw:set-trunk",
options = {"ports" : port_list})
def addSSHKeys (self, context, sname, client_ids, keys):
if not isinstance(client_ids, list): client_ids = [client_ids]
if not isinstance(keys, list): keys = [keys]
return self._apiv3.poa(context, self.urlv3, sname, "vts:container:add-keys",
options = {"client-ids" : client_ids, "ssh-keys" : keys})
def setDHCPSubnet (self, context, sname, subnet_tuples):
if not isinstance(subnet_tuples, list): subnet_tuples = [subnet_tuples]
clid_map = {}
for clid,subnet in subnet_tuples:
clid_map[clid] = subnet
return self._apiv3.poa(context, self.urlv3, sname, "api:uh.dhcp:set-subnet",
options = {"client-id-map" : clid_map})
def addDNSResourceRecord (self, context, sname, client_id, record_name, record_type, record_value, record_ttl=7200):
return self._apiv3.poa(context, self.urlv3, sname, "vts:uh.dnsroot:add-resource-record",
options = {"client-id" : client_id,
"record-name" : record_name,
"record-type" : record_type,
"record-value" : record_value,
"record-ttl" : record_ttl})
def deleteDNSResourceRecord (self, context, sname, client_id, record_name, record_type):
return self._apiv3.poa(context, self.urlv3, sname, "vts:uh.dnsroot:delete-resource-record",
options = {"client-id" : client_id,
"record-name" : record_name,
"record-type" : record_type})
def getAllDNSResourceRecords(self, context, sname, client_ids):
if not isinstance(client_ids, list): client_ids = [client_ids]
return self._apiv3.poa(context, self.urlv3, sname, "vts:uh.dnsroot:get-all-records",
options={"client-ids": client_ids})
def getLastDNSDHCPops(self, context, sname, client_ids, number_of_operations, dns_OR_dhcp):
if not isinstance(client_ids, list): client_ids = [client_ids]
return self._apiv3.poa(context, self.urlv3, sname, "vts:uh.dnsdhcp:get-last-DNSDHCP-ops",
options={"client-ids": client_ids,
"number-of-operations": number_of_operations,
"dns-OR-dhcp": dns_OR_dhcp})
def setDeleteLock (self, context, sname):
"""Prevent the given sliver from being deleted by another user with the credential.
.. note::
Locks are cumulative, and removed by calling `deletesliver`. When the last locking
user calls `deletesliver`, the sliver will be deleted. It is not possible to remove
your lock without risking deletion.
Args:
context: geni-lib context
sname (str): Slice name
"""
return self._apiv3.poa(context, self.urlv3, sname, "geni:set-delete-lock", {})
def dropboxLink (self, context):
"""Link your user_urn to a Dropbox account at this aggregate.
Args:
context: geni-lib context
Returns:
str: Dropbox authorization URL to paste into web browser
"""
return self._apiv3.paa(context, self.urlv3, "vts:dropbox:link-account")
def dropboxFinalize (self, context, authcode):
"""Finalize the Dropbox account link for this aggregate.
Args:
context: geni-lib context
authcode (str): Authorization code given by Dropbox
"""
return self._apiv3.paa(context, self.urlv3, "vts:dropbox:complete-link", {"auth-code" : authcode})
def dropboxUpload (self, context, sname, cvols):
"""Trigger upload to associated Dropbox account from requested container volumes.
Args:
context: geni-lib context
sname (str): Slice name
cvols (list): List of `(container client-id, volume-id)` tuples
"""
data = {}
for (cid,volid) in cvols:
data.setdefault(cid, []).append(volid)
return self._apiv3.poa(context, self.urlv3, sname, "vts:dropbox:upload", options = {"vols" : [data]})
def hgPull (self, context, sname, cvols):
"""Update an HgMount volume with the latest data from the source repository.
Args:
context: geni-lib context
sname (str): Slice name
cvols (list): List of `(container client-id, volume-id)` tuples
"""
data = {}
for (cid,volid) in cvols:
data.setdefault(cid, []).append((volid, True))
return self._apiv3.poa(context, | |
in multiple clusters.
# [B, N, K, V, 3]
times_idx = tf.concat([batch_idx, q_idx, head_idx], 5)[:, :, :, :, 0, :]
# [B, q_length, N]
times = tf.scatter_nd(
times_idx, tf.cast(tf.ones_like(closest_q), scattered_prob.dtype),
[B, q_length, N])
times = tf.maximum(1.0, times[:, :, :, None])
out = scattered_prob / times
return out
out = scatter(c_outputs, closest_q, q_length)
out_prob = scatter_atten_prob(c_atten_probs, closest_k, closest_q, k_length,
q_length)
return out, out_prob
class MultiSourceAttention(base_layer.BaseLayer):
"""Batch major attention with multiple source sub-attentions.
It attends to multiple sources and uses one query as input to generates a
combined attention context. The dimension of the combined context vector is a
sum of all source context vectors. Each source attention has its separate
params and is associated with a source key.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('source_atten_tpls', None,
'A list of (source_key, attention_param) pairs.')
p.Define('input_dim', 0, 'Default key dimension.')
p.Define('hidden_dim', 0, 'Default hidden dimension.')
p.Define(
'primary_source_key', 'source_0', 'Key for the primary source '
'whose attention probabilities will be used as an output.')
p.Define('atten_merger_tpl', None,
'Params to specify how to merge source attention vectors.')
return p
def __init__(self, params):
"""Constructs an MultiSourceAttention object."""
super().__init__(params)
p = self.params
assert p.primary_source_key in [
x for x, _ in p.source_atten_tpls
], ('Source attention must have the primary source key.')
for source_key, atten_p in p.source_atten_tpls:
child_p = atten_p.Copy()
if child_p.hidden_dim <= 0:
child_p.hidden_dim = p.hidden_dim
if child_p.input_dim <= 0:
child_p.input_dim = p.input_dim
self.CreateChild('atten_%s' % source_key, child_p)
# Initialize source context vector merging layer.
merger_p = p.atten_merger_tpl.Copy()
merger_p.name = 'atten_merger'
merger_p.source_dim = p.input_dim
merger_p.query_dim = p.input_dim
self.CreateChild('atten_merger', merger_p)
def FProp(self,
theta,
query_vec,
key_vec,
value_vec,
paddings,
segment_mask=None,
per_step_padding=None):
p = self.params
with tf.name_scope(self.params.name):
result_map = py_utils.NestedMap()
for source_key, _ in p.source_atten_tpls:
result_map[source_key] = (
self.children['atten_%s' % source_key].FProp(
theta.get('atten_%s' % source_key), query_vec,
key_vec[source_key], value_vec[source_key],
paddings[source_key],
segment_mask[source_key] if segment_mask else None,
per_step_padding))
return self._CombineContext(theta, result_map, query_vec)
def _CombineContext(self, theta, enc_map, query_vec):
encs = enc_map.Flatten()
combined_enc = (
self.atten_merger.FProp(theta.atten_merger, [enc for enc, _ in encs],
query_vec))
# Return atten_probs of the primary source.
return combined_enc, enc_map[self.params.primary_source_key][1]
def AttenProbs(self,
theta,
query,
key,
paddings,
segment_mask,
per_step_padding=None):
primary_source_key = self.params.primary_source_key
child_name = 'atten_%s' % primary_source_key
return self.children[child_name].AttenProbs(
theta.get(child_name), query, key[primary_source_key],
paddings[primary_source_key],
segment_mask[primary_source_key] if segment_mask else None,
per_step_padding)
class TransformerAttentionLayer(base_layer.BaseLayer):
"""Multiheaded attention sub-layer in Transformer layer.
Input is first normalized using Layer Normalization. Output of layer
normalization is processed using multi-headed attention. And finally, the
output of the attention layer is combined with the residual connection.
This layer will be used in the following two scenarios:
1. Multi-Headed Self-Attention, where attention keys, values (source_vecs) and
queries come from the same previous layer output.
2. Masked Multi-Headed Self-Attention, where attention keys, values and
queries all come from the same previous layer output, but rightward
activations are masked to prevent information flow from future. This is the
use case for Transformer decoder self-attention layers. Can be activated by
setting is_masked flag of this layer.
3. Multi-Headed Cross-Attention, where attention keys and values
(source_vecs) are coming from a different source (output of the encoder),
and queries coming from the previous layer outputs (decoder).
We use the same capital letters to denote certain tensor parameters as
MultiHeadedAttention class.
B = batch size
S = length of the key/value (source)
T = length of the query (target)
D = model dimension
N = number of attention heads
H = dimensions of each attention head.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_dim', 0, 'Dimension of the transformer block input.')
p.Define('hidden_dim', 0, 'Dimension of the attention hidden dim.')
p.Define('num_heads', 8, 'Number of attention heads.')
p.Define(
'is_masked', False,
'If set, uses causal non local multiheaded attention.'
'This option is not valid when atten_tpl is LocalSelfAttention '
'or its subclass(es).')
p.Define(
'atten_dropout_prob', 0.0,
'Probability at which we apply dropout to the attention probs. '
'This practically drops memory values at random positions.')
p.Define(
'residual_dropout_prob', 0.0,
'Probability at which we apply dropout to the residual layers, '
'such that, residual(x, y) = (x + dropout(y)).')
p.Define('add_unnormalized_input', True,
'If set, uses unnormalized input in the residual add.')
p.Define('add_skip_connection', True,
'If True, add input (or normalized input) to the output.')
p.Define('ln_tpl', layers.LayerNorm.Params(),
'Layer norm default params. No layernorm if set to None.')
p.Define('atten_tpl',
MultiHeadedAttention.Params().Set(),
'Multi-Headed Dot-Product Attention default params')
p.Define(
'dropout_tpl', layers.DropoutLayer.Params(),
'Residual dropout params template. keep_prop will be reset to '
'(1.0 - residual_dropout_prob).')
return p
@classmethod
def CommonParams(cls,
input_dim,
num_heads,
is_masked=False,
use_relative_atten=False,
relative_pos_emb_dim=None,
local_context=None,
left_context=None,
right_context=None,
dropout_prob=0.):
# pylint: disable=g-doc-args
"""Returns a hyperparam for the most representative cases.
CommonParams is not expected to be extended to an omnipotent/generic builder
method. Specific use cases should take the return value of it and apply
further customization. It should be kept lean and only extended cautiously
for very common cases.
"""
# pylint: enable=g-doc-args
if not use_relative_atten:
assert not relative_pos_emb_dim
else:
relative_pos_emb_dim = relative_pos_emb_dim or input_dim
if local_context:
assert not left_context and not right_context, (
'local_context and (left_context, right_context) can not be set '
'at the same time.')
left_context = local_context + 1 # include 'self' position.
right_context = local_context
p = cls.Params().Set(
input_dim=input_dim,
num_heads=num_heads,
is_masked=is_masked,
atten_dropout_prob=dropout_prob,
residual_dropout_prob=dropout_prob)
is_local = left_context or right_context
if is_local:
atten_cls = (
LocalSelfAttentionXL if use_relative_atten else LocalSelfAttention)
else:
atten_cls = (
MultiHeadedAttentionXL
if use_relative_atten else MultiHeadedAttention)
p.atten_tpl = atten_cls.Params()
if use_relative_atten:
p.atten_tpl.rel_pos_emb_dim = relative_pos_emb_dim
if is_local:
p.atten_tpl.Set(left_context=left_context, right_context=right_context)
return p
def _InitAttentionParams(self, atten_tpl):
"""Returns an initialized transformer attention parameters."""
p = self.params
params = atten_tpl.Copy()
params.name = 'multihead_atten'
params.input_dim = p.input_dim
params.hidden_dim = p.hidden_dim
params.num_heads = p.num_heads
params.atten_dropout_prob = p.atten_dropout_prob
return params
def __init__(self, params):
super().__init__(params)
p = self.params
if not p.hidden_dim:
p.hidden_dim = p.input_dim
# Initialize attention.
params = self._InitAttentionParams(p.atten_tpl)
if p.is_masked and issubclass(params.cls, LocalSelfAttention):
tf.logging.warn('\'is_masked\' is not effective when used with '
'LocalSelfAttention and its subclass(es).')
self.CreateChild('atten', params)
# Initialize attention layer normalization.
if p.ln_tpl:
params = p.ln_tpl.Copy()
params.name = 'atten_ln'
params.input_dim = p.input_dim
self.CreateChild('layer_norm', params)
# Initialize residual dropout.
dropout_tpl = p.dropout_tpl.Copy()
dropout_tpl.keep_prob = (1.0 - p.residual_dropout_prob)
self.CreateChild('residual_dropout', dropout_tpl)
def FProp(self,
theta,
query_vec,
source_vecs,
paddings,
per_step_padding_override=None,
segment_mask=None):
"""Compute the result of Transformer attention layer.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query_vec: [B, T, D].
source_vecs: [B, S, D] (cross_attention) or None (self-attention).
paddings: [B, S].
per_step_padding_override: [B, T, T] for self attention or [B, T, S] for
cross attention.
segment_mask: [B, 1, T, S].
Returns:
output: [B, T, D].
atten_probs: [B, N, T, S].
"""
p = self.params
b, t, _ = py_utils.GetShape(query_vec, 3)
unnormalized_query_vec = query_vec
# Layer normalization.
if p.ln_tpl:
query_vec = self.layer_norm.FProp(theta.layer_norm, query_vec)
# For self-attention: keys = queries.
if source_vecs is None:
source_vecs = query_vec
# Generates mask, with shape [b, t, t].
if per_step_padding_override is None:
if p.is_masked and segment_mask is None:
# causal padding.
per_step_padding = tf.tile(
tf.expand_dims(CausalPadding(t, dtype=query_vec.dtype), 0),
[b, 1, 1])
else:
per_step_padding = None
else:
per_step_padding = per_step_padding_override
# Multiheaded attention.
with tf.name_scope('atten'):
ctx_vec, atten_probs = self.atten.FProp(
theta.atten,
query_vec, # query
source_vecs, # key
source_vecs, # value
paddings,
segment_mask=segment_mask,
per_step_padding=per_step_padding)
# Residual connection.
ctx_vec = self.residual_dropout.FProp(theta.residual_dropout, ctx_vec)
input_to_add = (
unnormalized_query_vec if p.add_unnormalized_input else query_vec)
if p.add_skip_connection:
ctx_vec += input_to_add
return ctx_vec, atten_probs
def InitStates(self, theta, target_batch_size, target_max_length):
return self.atten.InitStates(theta.atten, target_batch_size,
target_max_length)
def ExtendStep(self,
theta,
query_vec,
cached_states,
time_step,
use_short_seq_opt=False):
"""Compute the result and update cached states for the current step.
This function is used by autoregressive decoding. This function knows the
length of full sequence, thus it is different from StreamingExtendStep.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query_vec: [B, 1, D]
cached_states: A `.NestedMap` object containing tensors which are the
results of previous attentions, used for fast decoding. key - [T, B,
N, H]. value - [T, B, N, H].
time_step: A scalar or tensor with [B], current decode step, 0-based.
if it's a scalar, all the time step are the same decode step.
if it's a tensor, it represents current decode step for | |
and RingFileName is None:
warning(" No file would be exported")
pass
array, columns = ReadFile(FilePath=InputFileName, header=0, get_values=True, get_columns=True)
path, forwardArray, reversedPath, reversedArray = \
getRingAttachedBondsDatabase(database=array, MoleculeCol=MoleculeCol, BondIdxCol=BondIdxCol,
aromaticOnly=aromaticOnly)
forwardDataframe = pd.DataFrame(data=forwardArray, columns=columns)
if RingFileName is not None:
ExportFile(DataFrame=forwardDataframe, FilePath=RingFileName)
reversedDataframe = pd.DataFrame(data=reversedArray, columns=columns)
if NonRingFileName is not None:
ExportFile(DataFrame=reversedDataframe, FilePath=NonRingFileName)
return path, forwardDataframe, reversedPath, reversedDataframe
def constraintDataset(data: Union[pd.DataFrame, ndarray], limitTest: Union[int, float, List, Tuple] = None) \
-> Union[pd.DataFrame, ndarray]:
if limitTest is None:
return data
name: str = 'limitTest'
inputFullCheck(value=limitTest, name=name, dtype='int-float-List-Tuple-None',
delimiter='-', fastCheck=True)
if inputFastCheck(value=limitTest, dtype='float'):
inputCheckRange(value=limitTest, name=name, maxValue=1, allowFloatInput=True, rightBound=True)
if limitTest == 1 or limitTest == 1.0:
limitTest = data.shape[0]
else:
limitTest: int = int(limitTest * data.shape[0])
if limitTest is None or (inputFastCheck(value=limitTest, dtype='int') and limitTest == data.shape[0]):
return data
if inputFastCheck(value=limitTest, dtype='int-Tuple', delimiter='-'):
inputCheckRange(value=limitTest, name=name, minValue=0, maxValue=data.shape[0],
leftBound=False, rightBound=False)
if isinstance(limitTest, Tuple):
inputCheckIterableInRange(value=limitTest, name=name, maxValue=data.shape[0], maxInputInside=2)
if min(limitTest) == 0:
limitTest = max(limitTest)
if isinstance(limitTest, int):
deleteLine: List[int] = [i for i in range(limitTest, data.shape[0])]
else:
inputCheckIterableInRange(value=limitTest, name=name, maxValue=data.shape[0])
deleteLine: List[int] = [i for i in range(0, min(limitTest))]
for val in range(max(limitTest), data.shape[0]):
deleteLine.append(val)
else:
limitTest.sort()
deleteLine: List[int] = GetRemainingIndexFromLimit(indexArray=limitTest, maximumValue=data.shape[0])
if isinstance(data, pd.DataFrame):
return data.drop(labels=deleteLine, inplace=False)
return np.delete(data, obj=deleteLine, axis=0)
class _BaseGenerator(DatasetAPI):
""" Python API for AIP-BDET model: Integrated with PyCharm & RDKit """
# Constant or Count or Counter
_MACCS_Length, _ErG_Length, _legacyStereoChemistry = 167, 315, True # dFramework["Legacy"]
_FixedBondTypeCache, _nonConnectedAtom = dFramework["Bond Type Cache"], dFramework["Non-Connected Atoms"]
_FixedBondNotationCache: Tuple = ('-', '=', '#')
_hydrogenSmiles: str = "[H]"
callWarning: bool = False
FingerprintTemplateLabels: Optional[Union[List[str], Tuple[str, ...]]] = None
TemplateLabels: Optional[Union[List[str], Tuple[str, ...]]] = None
def __init__(self, DatasetObject: FeatureData = None, priorityKey: Optional[str] = "Auto",
boostMode: bool = True, simplifySmilesEnvironment: bool = True,
showInvalidStereochemistry: bool = False, environmentCatching: bool = False):
print("-" * 33, "INITIALIZATION", "-" * 33)
# [1]: Dataset Pointer
super(_BaseGenerator, self).__init__(dataset=DatasetObject, priority_key=priorityKey)
# [1.1]: Data Position ---> Cache Purpose Only
self.dataType: np.dtype = self._dataset.dataType
self._mol, self._radicals, self._bondIdx, self._bondType = self._dataset.getPropertiesRespectToColumn()
self.InputKey: int = self._dataset.InputKey
# Controlling-Function Wrapper
template_: Tuple[Callable, Callable] = (self._AtomBondEncoding, self._fillDataType2)
iDict: Dict[int, Tuple[Union[int, Callable], Callable]] = \
{
1: (_getEmptyList_, self._fillDataType1), 2: (_getEmptyList_, self._fillDataType2),
3: (_getEmptyList_, self._fillDataType3), 4: (_getEmptyList_, self._fillDataType2),
5: template_, 5001: template_,
}
_boostCalling: Dict[Callable, Optional[Callable]] = \
{iDict[1][1]: None, template_[1]: self._fillDataType2Fast, iDict[3][1]: None}
self._AtomBondHashing_, self._FunctionWrapper_ = iDict[self.InputKey]
# [2]: Check-up Features
# [2.1.a]: Generate Fingerprint Configuration
if True:
data = dFramework
self._Weight, self._Descriptors = data["Weight"], data["Descriptors"]
self._cisTrans, self._countCisTrans = data["Cis-Trans Encoding"], data["Cis-Trans Counting"]
self._LargeRadius, self._SmallRadius = data["Large Environment"], data["Small Environment"]
self._ECFP_Radius, self._ECFP_nBits = data["ECFP_Radius"], data["ECFP_nBits"]
self._FCFP_Radius, self._FCFP_nBits = data["FCFP_Radius"], data["FCFP_nBits"]
self._MHECFP_Radius, self._MHECFP_nBits = data["MHECFP_Radius"], data["MHECFP_nBits"]
self._MHFCFP_Radius, self._MHFCFP_nBits = data["MHFCFP_Radius"], data["MHFCFP_nBits"]
self._SECFP_Radius, self._SECFP_nBits = data["SECFP_Radius"], data["SECFP_nBits"]
self._SECFP_Seed, self._SECFP_Info = data["SECFP_Seed"], data["SECFP_Info"]
self._RDKit_maxPath, self._RDKit_nBits = data["RDKit_maxPath"], data["RDKit_nBits"]
self._Layered_maxPath, self._Layered_nBits = data["LayeredRDKit_maxPath"], data["LayeredRDKit_nBits"]
self._Torsion_nBits, self._Torsion_Size = data["Torsion_nBits"], data["Torsion_Size"]
self._Pattern_nBits, self._Pattern_Tautomer = data["Pattern_nBits"], data["Pattern_Tautomer"]
self._AtomPairs_nBits = data["AtomPairs_nBits"]
self._Avalon_nBits, self._MACCS = data["Avalon_nBits"], data["MACCS"]
self._ErG, self._ErG_fuzz, self._ErG_maxPath = data["ErG"], data["ErG_fuzzIncrement"], data["ErG_maxPath"]
self._handleConfiguration(numsInput=iFramework[self._dataset.InputKey][0])
self._SECFP_Encode = MHFPEncoder(self._SECFP_Seed).EncodeSECFPMol if self._SECFP_nBits != 0 else None
# [2.1.b]: Cache support for create molecule/environment's descriptors
self._AtomicNumberCache: Dict[int, int] = {value: key for key, value in enumerate(data["Atom Symbol"][1])}
self._NonAromaticBond: Dict[str, int] = {value: key for key, value in enumerate(data["Non-Aro Bond"])}
self._OuterAroBond: Dict[str, int] = {value: key for key, value in enumerate(data["Outer-Ring Bond"])}
self._InnerAroBond: Dict[str, int] = {value: key for key, value in enumerate(data["Inner-Ring Bond"])}
# [2.1.c]: Additional | Extra Features
self.CounterVectorLabels: List[str] = [] # How to represent atom-neighbor-bond information (counter)
if self._AtomBondHashing_ != _getEmptyList_:
# [1]: Constructing base information
Extra_1: List[str] = ["C", "H", "O", "N"] # Atom Symbol
Extra_2: List[str] = ["OTHER", "SP", "SP2", "SP3"] # RDKit Hybridization
Extra_3: List[str] = ["Aromatic", "Ring"] # Other Atomic Features
Extra_4: List[str] = ["SINGLE", "DOUBLE", "TRIPLE", "AROMATIC"] # Neighboring-Bond in LargeEnv/Mol
Extra_5: List[str] = ['Ring-Attached']
# self.Extra_5: List[str] = ["CHI_OTHER", "CHI_TETRAHEDRAL_CCW", "CHI_TETRAHEDRAL_CW"] Chiral Tag
# [2]: Build Atom-Neighbor-Bond Information
# (i): Label Features at self.CounterVectorLabels
# (ii): Filter out same operation at self.tokenDict (atom-neighbor: counting ~ Tuple)
# (iii): Locate the location of self.tokenPosition. First value is constraint between
# atom-neighbor: counting & bond counting and identifier
# [2.1]: Initialization
extraOfAtomNeighbor: List[str] = Extra_1 + Extra_2 + Extra_3
tokenAtomicSize: int = len(extraOfAtomNeighbor) # Extra_1, Extra_2, Extra_3 = Step for Neighbor
# Ordering: Associated Atoms --> Neighbor Atoms --> Target Bonds
eachExtraSize: int = 0
self.tokenDict: List[Dict[str, Union[int, Tuple[int, int]]]] = []
self.tokenPosition: Union[List[Union[int, Tuple[int, int]]], Tuple] = [tokenAtomicSize]
# [2.2]: Main operation
# [2.2.1]: Atom-Neighbor Features
for i in range(0, 2):
name: str = 'Target - ' if i == 0 else 'Neighbor - '
for k, value in enumerate(extraOfAtomNeighbor):
self.CounterVectorLabels.append(f"{name}{value}")
for extra in (Extra_1, Extra_2, Extra_3):
self.tokenDict.append({})
for idx, key in enumerate(extra):
self.tokenDict[-1][key] = (eachExtraSize + idx, eachExtraSize + self.tokenPosition[0] + idx)
eachExtraSize += len(extra)
self.tokenPosition.append((eachExtraSize, eachExtraSize + self.tokenPosition[0]))
eachExtraSize += tokenAtomicSize
if eachExtraSize != 2 * tokenAtomicSize:
raise ValueError("Error Source Code")
# [2.2.2]: Bond Features
self.tokenDict.append({})
for idx, key in enumerate(Extra_4):
self.CounterVectorLabels.append(key)
self.tokenDict[-1][key] = eachExtraSize + idx
eachExtraSize += len(Extra_4)
self.tokenPosition.append(eachExtraSize)
if dFramework["Accept Ring-Attached Bond Record"] is True:
self.tokenDict.append({})
for idx, key in enumerate(Extra_5):
self.CounterVectorLabels.append(key)
self.tokenDict[-1][key] = eachExtraSize + idx
eachExtraSize += len(Extra_5)
self.tokenPosition.append(eachExtraSize)
self.tokenDict: Tuple = tuple(self.tokenDict)
self.tokenPosition: Tuple = tuple(self.tokenPosition)
pass
self.CisTransLabels: List[str] = ["Stereo Chemistry"] # E: Trans --- Z: Cis
if self._countCisTrans:
self.CisTransLabels.append("Count Z-Bonds")
self.CisTransLabels.append("Count E-Bonds")
if self._cisTrans:
# ["Cis Bond", "Cis Atom (C)", "Cis Atom (N)", "Trans Bond", "Trans Atom (C)", "Trans Atom (N)"]
for value_mode in ("Cis", "Trans"):
self.CisTransLabels.append(f"{value_mode} Bond")
for atom in dFramework["Cis-Trans Atoms"]:
self.CisTransLabels.append(f"{value_mode} Atom ({atom})")
pass
self._UniqueBondTypeList: List[str] = []
# [2.1.d]: Hydrogen Cache & Boost Activation
self._hydrogen: Optional[Union[ndarray, str]] = None
self._binaryMode: bool = True
self._boostMode: bool = False
if True in (self._Weight[0], self._Weight[1], self._Descriptors, self._ErG):
self._binaryMode: bool = False
raise NotImplementedError("Weight, Descriptors, or ErG Fingerprints are not available to use")
if boostMode and _boostCalling[self._FunctionWrapper_] is not None and self._binaryMode:
self._boostMode: bool = True
self._FunctionWrapper_: Callable = _boostCalling[self._FunctionWrapper_]
if environmentCatching:
warning(f" Bit2Edge disable environmentCatching")
environmentCatching = False
if showInvalidStereochemistry:
warning(f" Bit2Edge disable showInvalidStereochemistry")
showInvalidStereochemistry = False
else:
warning(f" self._boostMode={self._boostMode} is not activated")
# [2.2]: Detailed Fingerprint's Generator:
if True:
_Weight_, _ExactWeight_, _Descriptors_ = self._Weight_, self._ExactWeight_, self._Descriptors_
_ECFP_, _FCFP_, _MHECFP_, _MHFCFP_ = self._ECFP_, self._FCFP_, self._MHECFP_, self._MHFCFP_
_SECFP_, _RDK_Build, _Layered_, _AtomPair_ = self._SECFP_, self._RDK_, self._Layered_, self._AtomPair_
_TTorsion, _Avalon_, _Pattern_, _MACCS_, _ErG_ = self._TTorsion_, self._Avalon_, self._Pattern_, \
self._MACCS_, self._ErG_
Status: Tuple = \
(self._Weight[0], self._Weight[1], self._Descriptors, self._ErG, self._ECFP_nBits, self._FCFP_nBits,
self._MHECFP_nBits, self._MHFCFP_nBits, self._SECFP_nBits, self._RDKit_nBits, self._Layered_nBits,
self._AtomPairs_nBits, self._Torsion_nBits, self._Avalon_nBits, self._Pattern_nBits, self._MACCS)
LinkedFunction: Tuple = \
(_Weight_, _ExactWeight_, _Descriptors_, _ErG_, _ECFP_, _FCFP_, _MHECFP_, _MHFCFP_, _SECFP_,
_RDK_Build, _Layered_, _AtomPair_, _TTorsion, _Avalon_, _Pattern_, _MACCS_)
# Whether to create or return bit vector
LinkedFunctionCache: Tuple = \
(False, False, False, False, True, True, True, True, True, True, True, True, True, True, True, True)
UpdatedFunction: Tuple = \
(1, 1, len(data["Atom Symbol"][1]) * 2 + 18 + len(data["Non-Aro Bond"]) +
len(data["Outer-Ring Bond"]) + len(data["Inner-Ring Bond"]) + 1, self._ErG_Length,
self._ECFP_nBits, self._FCFP_nBits, self._MHECFP_nBits, self._MHFCFP_nBits, self._SECFP_nBits,
self._RDKit_nBits, self._Layered_nBits, self._AtomPairs_nBits, self._Torsion_nBits, self._Avalon_nBits,
self._Pattern_nBits, self._MACCS_Length,)
cache: List[int] = [len(Status), len(LinkedFunction), len(UpdatedFunction), len(LinkedFunctionCache)]
if len(list(set(cache))) != 1:
raise ValueError(f"There are something wrong with {LinkedFunction} or {Status} or {cache}")
featureSize: int = 0
self._FeatureLocation: List[List[int], List[int]] = [[], []]
self._FeatureGenerator_: Union[List[Callable], Tuple] = []
self._BitVectFingerprintParserIndex: int = 0
for index in range(0, len(Status)):
if int(Status[index]) != 0:
self._FeatureGenerator_.append(LinkedFunction[index])
self._FeatureLocation[0].append(featureSize)
featureSize += UpdatedFunction[index]
self._FeatureLocation[1].append(featureSize)
if not LinkedFunctionCache[index]:
self._BitVectFingerprintParserIndex += 1
pass
self._FeatureGenerator_ = tuple(self._FeatureGenerator_)
self._FastGenerate_: Callable = self._GenerateDataWithPartialBitType if \
self._BitVectFingerprintParserIndex != 0 else self._GenerateDataWithFullBitType
# [3]: Derived Attribute --> Build when getFeatureLabels()
# [3.1]: Info-Feature Data Cache -- Non-Primitive
self.indexData: Optional[List[Tuple[int, str]]] = None
self._bondIdxCacheList: Optional[List[int]] = None
self._KekulizedParentMol: Mol = None
self._SanitizedParentMol: Mol = None
self._InfoData: Optional[ndarray] = None
self._FeatureData: Optional[ndarray] = None
self._Environment: Optional[ndarray] = None
self.StartPosition: Optional[Tuple[int, ...]] = None
self.EndPosition: Optional[Tuple[int, ...]] = None
# [3.2]: Feature Data Cache -- Primitive Cache
self.ExtraSize: int = 0
self._invertedExtraSize: int = 0
self.numsUniqueBondType: int = 0
# [4.1]: Timing Attribute
self._creationBDE, self._conversionBDE = 0, 0
self._creationTiming, | |
randomize()
instance.enabled = randomize("bool")
instance.end_date = randomize("date")
instance.targeted_namespace = randomize("slug")
instance.disabled_date = randomize("date")
return instance
def create_accountcommon_list_users_with_platform_accounts_response_example() -> AccountcommonListUsersWithPlatformAccountsResponse:
instance = AccountcommonListUsersWithPlatformAccountsResponse()
instance.data = [create_accountcommon_user_with_platform_accounts_example()]
instance.paging = create_accountcommon_pagination_v3_example()
instance.total_data = randomize("int", min_val=1, max_val=1000)
return instance
def create_accountcommon_namespace_role_example() -> AccountcommonNamespaceRole:
instance = AccountcommonNamespaceRole()
instance.namespace = randomize("slug")
instance.role_id = randomize("uid")
return instance
def create_accountcommon_netflix_certificates_example() -> AccountcommonNetflixCertificates:
instance = AccountcommonNetflixCertificates()
instance.encrypted_private_key = randomize()
instance.public_certificate = randomize()
instance.root_certificate = randomize()
return instance
def create_accountcommon_pagination_example() -> AccountcommonPagination:
instance = AccountcommonPagination()
instance.first = randomize()
instance.last = randomize()
instance.next_ = randomize()
instance.previous = randomize()
return instance
def create_accountcommon_pagination_v3_example() -> AccountcommonPaginationV3:
instance = AccountcommonPaginationV3()
instance.first = randomize()
instance.last = randomize()
instance.next_ = randomize()
instance.previous = randomize()
return instance
def create_accountcommon_permission_example() -> AccountcommonPermission:
instance = AccountcommonPermission()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_accountcommon_permission_v3_example() -> AccountcommonPermissionV3:
instance = AccountcommonPermissionV3()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_accountcommon_permissions_example() -> AccountcommonPermissions:
instance = AccountcommonPermissions()
instance.permissions = [create_accountcommon_permission_example()]
return instance
def create_accountcommon_permissions_v3_example() -> AccountcommonPermissionsV3:
instance = AccountcommonPermissionsV3()
instance.permissions = [create_accountcommon_permission_v3_example()]
return instance
def create_accountcommon_platform_account_example() -> AccountcommonPlatformAccount:
instance = AccountcommonPlatformAccount()
instance.namespace = randomize("slug")
instance.platform_user_id = randomize()
return instance
def create_accountcommon_registered_domain_example() -> AccountcommonRegisteredDomain:
instance = AccountcommonRegisteredDomain()
instance.affected_client_i_ds = [randomize()]
instance.domain = randomize()
instance.namespaces = [randomize()]
instance.role_id = randomize("uid")
return instance
def create_accountcommon_role_example() -> AccountcommonRole:
instance = AccountcommonRole()
instance.admin_role = randomize("bool")
instance.deletable = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.managers = [create_accountcommon_role_manager_example()]
instance.members = [create_accountcommon_role_member_example()]
instance.permissions = [create_accountcommon_permission_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_accountcommon_role_manager_example() -> AccountcommonRoleManager:
instance = AccountcommonRoleManager()
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_role_manager_v3_example() -> AccountcommonRoleManagerV3:
instance = AccountcommonRoleManagerV3()
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_role_member_example() -> AccountcommonRoleMember:
instance = AccountcommonRoleMember()
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_role_member_v3_example() -> AccountcommonRoleMemberV3:
instance = AccountcommonRoleMemberV3()
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_role_v3_example() -> AccountcommonRoleV3:
instance = AccountcommonRoleV3()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.managers = [create_accountcommon_role_manager_v3_example()]
instance.members = [create_accountcommon_role_member_v3_example()]
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_accountcommon_simple_user_platform_info_v3_example() -> AccountcommonSimpleUserPlatformInfoV3:
instance = AccountcommonSimpleUserPlatformInfoV3()
instance.linked_at = randomize()
instance.namespace = randomize("slug")
instance.origin_namespace = randomize("slug")
instance.display_name = randomize("slug")
instance.platform_id = randomize()
return instance
def create_accountcommon_user_linked_platform_example() -> AccountcommonUserLinkedPlatform:
instance = AccountcommonUserLinkedPlatform()
instance.linked_at = randomize()
instance.namespace = randomize("slug")
instance.origin_namespace = randomize("slug")
instance.user_id = randomize("uid")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.xuid = randomize()
return instance
def create_accountcommon_user_linked_platform_v3_example() -> AccountcommonUserLinkedPlatformV3:
instance = AccountcommonUserLinkedPlatformV3()
instance.account_group = randomize()
instance.linked_at = randomize()
instance.namespace = randomize("slug")
instance.origin_namespace = randomize("slug")
instance.user_id = randomize("uid")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.platform_id = randomize()
instance.platform_user_id = randomize()
return instance
def create_accountcommon_user_linked_platforms_response_v3_example() -> AccountcommonUserLinkedPlatformsResponseV3:
instance = AccountcommonUserLinkedPlatformsResponseV3()
instance.data = [create_accountcommon_user_linked_platform_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_accountcommon_user_platform_info_example() -> AccountcommonUserPlatformInfo:
instance = AccountcommonUserPlatformInfo()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.user_id = randomize("uid")
return instance
def create_accountcommon_user_platforms_example() -> AccountcommonUserPlatforms:
instance = AccountcommonUserPlatforms()
instance.user_id_platforms = [create_accountcommon_user_platform_info_example()]
return instance
def create_accountcommon_user_search_by_platform_id_result_example() -> AccountcommonUserSearchByPlatformIDResult:
instance = AccountcommonUserSearchByPlatformIDResult()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.linked_platforms = [create_accountcommon_user_linked_platform_example()]
instance.phone_number = randomize()
instance.user_id = randomize("uid")
return instance
def create_accountcommon_user_search_result_example() -> AccountcommonUserSearchResult:
instance = AccountcommonUserSearchResult()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.linked_platforms = [create_accountcommon_user_linked_platform_example()]
instance.phone_number = randomize()
instance.user_id = randomize("uid")
return instance
def create_accountcommon_user_with_linked_platform_accounts_example() -> AccountcommonUserWithLinkedPlatformAccounts:
instance = AccountcommonUserWithLinkedPlatformAccounts()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.linked_platforms = [create_accountcommon_platform_account_example()]
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_user_with_platform_accounts_example() -> AccountcommonUserWithPlatformAccounts:
instance = AccountcommonUserWithPlatformAccounts()
instance.linked_platforms = [create_accountcommon_platform_account_example()]
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_banned_by_example() -> BannedBy:
instance = BannedBy()
instance.display_name = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_bloom_filter_json_example() -> BloomFilterJSON:
instance = BloomFilterJSON()
instance.bits = [randomize("int", min_val=1, max_val=1000)]
instance.k = randomize("int", min_val=1, max_val=1000)
instance.m = randomize("int", min_val=1, max_val=1000)
return instance
def create_clientmodel_client_create_request_example() -> ClientmodelClientCreateRequest:
instance = ClientmodelClientCreateRequest()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_example()]
instance.namespace = randomize("slug")
instance.redirect_uri = randomize()
instance.secret = randomize()
return instance
def create_clientmodel_client_creation_response_example() -> ClientmodelClientCreationResponse:
instance = ClientmodelClientCreationResponse()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_example()]
instance.namespace = randomize("slug")
instance.redirect_uri = randomize()
return instance
def create_clientmodel_client_creation_v3_request_example() -> ClientmodelClientCreationV3Request:
instance = ClientmodelClientCreationV3Request()
instance.audiences = [randomize()]
instance.base_uri = randomize()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_v3_example()]
instance.client_platform = randomize()
instance.namespace = randomize("slug")
instance.oauth_client_type = randomize()
instance.redirect_uri = randomize()
instance.secret = randomize()
instance.deletable = randomize("bool")
return instance
def create_clientmodel_client_response_example() -> ClientmodelClientResponse:
instance = ClientmodelClientResponse()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_example()]
instance.created_at = randomize("date")
instance.namespace = randomize("slug")
instance.redirect_uri = randomize()
return instance
def create_clientmodel_client_update_request_example() -> ClientmodelClientUpdateRequest:
instance = ClientmodelClientUpdateRequest()
instance.client_name = randomize()
instance.redirect_uri = randomize()
return instance
def create_clientmodel_client_update_secret_request_example() -> ClientmodelClientUpdateSecretRequest:
instance = ClientmodelClientUpdateSecretRequest()
instance.new_secret = randomize()
return instance
def create_clientmodel_client_update_v3_request_example() -> ClientmodelClientUpdateV3Request:
instance = ClientmodelClientUpdateV3Request()
instance.client_platform = randomize()
instance.audiences = [randomize()]
instance.base_uri = randomize()
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_v3_example()]
instance.deletable = randomize("bool")
instance.namespace = randomize("slug")
instance.redirect_uri = randomize()
return instance
def create_clientmodel_client_v3_response_example() -> ClientmodelClientV3Response:
instance = ClientmodelClientV3Response()
instance.audiences = [randomize()]
instance.base_uri = randomize()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_v3_example()]
instance.client_platform = randomize()
instance.created_at = randomize("date")
instance.modified_at = randomize("date")
instance.namespace = randomize("slug")
instance.oauth_client_type = randomize()
instance.redirect_uri = randomize()
instance.scopes = [randomize()]
return instance
def create_clientmodel_clients_v3_response_example() -> ClientmodelClientsV3Response:
instance = ClientmodelClientsV3Response()
instance.data = [create_clientmodel_client_v3_response_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_legal_accepted_policies_request_example() -> LegalAcceptedPoliciesRequest:
instance = LegalAcceptedPoliciesRequest()
instance.is_accepted = randomize("bool")
instance.localized_policy_version_id = randomize()
instance.policy_id = randomize()
instance.policy_version_id = randomize()
return instance
def create_model_add_user_role_v4_request_example() -> ModelAddUserRoleV4Request:
instance = ModelAddUserRoleV4Request()
instance.assigned_namespaces = [randomize()]
instance.role_id = randomize("uid")
return instance
def create_model_age_restriction_request_example() -> ModelAgeRestrictionRequest:
instance = ModelAgeRestrictionRequest()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.enable = randomize("bool")
return instance
def create_model_age_restriction_request_v3_example() -> ModelAgeRestrictionRequestV3:
instance = ModelAgeRestrictionRequestV3()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.enable = randomize("bool")
return instance
def create_model_age_restriction_response_example() -> ModelAgeRestrictionResponse:
instance = ModelAgeRestrictionResponse()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.enable = randomize("bool")
return instance
def create_model_age_restriction_response_v3_example() -> ModelAgeRestrictionResponseV3:
instance = ModelAgeRestrictionResponseV3()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.enable = randomize("bool")
return instance
def create_model_assign_user_v4_request_example() -> ModelAssignUserV4Request:
instance = ModelAssignUserV4Request()
instance.assigned_namespaces = [randomize()]
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_assigned_user_v4_response_example() -> ModelAssignedUserV4Response:
instance = ModelAssignedUserV4Response()
instance.assigned_namespaces = [randomize()]
instance.display_name = randomize("slug")
instance.email = randomize("email")
instance.role_id = randomize("uid")
instance.user_id = randomize("uid")
return instance
def create_model_authenticator_key_response_v4_example() -> ModelAuthenticatorKeyResponseV4:
instance = ModelAuthenticatorKeyResponseV4()
instance.secret_key = randomize()
instance.uri = randomize()
return instance
def create_model_backup_codes_response_v4_example() -> ModelBackupCodesResponseV4:
instance = ModelBackupCodesResponseV4()
instance.generated_at = randomize("int", min_val=1, max_val=1000)
instance.invalid_codes = [randomize()]
instance.valid_codes = [randomize()]
return instance
def create_model_ban_create_request_example() -> ModelBanCreateRequest:
instance = ModelBanCreateRequest()
instance.ban = randomize()
instance.comment = randomize()
instance.end_date = randomize()
instance.reason = randomize()
instance.skip_notif = randomize("bool")
return instance
def create_model_ban_update_request_example() -> ModelBanUpdateRequest:
instance = ModelBanUpdateRequest()
instance.enabled = randomize("bool")
instance.skip_notif = randomize("bool")
return instance
def create_model_check_valid_user_id_request_v4_example() -> ModelCheckValidUserIDRequestV4:
instance = ModelCheckValidUserIDRequestV4()
instance.user_ids = [randomize()]
return instance
def create_model_country_example() -> ModelCountry:
instance = ModelCountry()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.country_code = randomize()
instance.country_name = randomize()
instance.enable = randomize("bool")
return instance
def create_model_country_age_restriction_request_example() -> ModelCountryAgeRestrictionRequest:
instance = ModelCountryAgeRestrictionRequest()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_country_age_restriction_v3_request_example() -> ModelCountryAgeRestrictionV3Request:
instance = ModelCountryAgeRestrictionV3Request()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_country_v3_response_example() -> ModelCountryV3Response:
instance = ModelCountryV3Response()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.country_code = randomize()
instance.country_name = randomize()
instance.enable = randomize("bool")
return instance
def create_model_create_justice_user_response_example() -> ModelCreateJusticeUserResponse:
instance = ModelCreateJusticeUserResponse()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_disable_user_request_example() -> ModelDisableUserRequest:
instance = ModelDisableUserRequest()
instance.reason = randomize()
return instance
def create_model_email_update_request_v4_example() -> ModelEmailUpdateRequestV4:
instance = ModelEmailUpdateRequestV4()
instance.code = randomize()
instance.email_address = randomize("email")
return instance
def create_model_enabled_factors_response_v4_example() -> ModelEnabledFactorsResponseV4:
instance = ModelEnabledFactorsResponseV4()
instance.default = randomize()
instance.enabled = [randomize()]
return instance
def create_model_forgot_password_request_v3_example() -> ModelForgotPasswordRequestV3:
instance = ModelForgotPasswordRequestV3()
instance.email_address = randomize("email")
instance.language_tag = randomize()
return instance
def create_model_get_admin_users_response_example() -> ModelGetAdminUsersResponse:
instance = ModelGetAdminUsersResponse()
instance.data = [create_model_user_response_example()]
instance.paging = create_accountcommon_pagination_example()
return instance
def create_model_get_publisher_user_response_example() -> ModelGetPublisherUserResponse:
instance = ModelGetPublisherUserResponse()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_get_user_ban_v3_response_example() -> ModelGetUserBanV3Response:
instance = ModelGetUserBanV3Response()
instance.data = [create_model_user_ban_response_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_get_user_justice_platform_account_response_example() -> ModelGetUserJusticePlatformAccountResponse:
instance = ModelGetUserJusticePlatformAccountResponse()
instance.designated_namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_get_user_mapping_example() -> ModelGetUserMapping:
instance = ModelGetUserMapping()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_get_users_response_with_pagination_v3_example() -> ModelGetUsersResponseWithPaginationV3:
instance = ModelGetUsersResponseWithPaginationV3()
instance.data = [create_model_user_response_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_input_validation_data_example() -> ModelInputValidationData:
instance = ModelInputValidationData()
instance.field | |
None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetCreateEcsImageProgressAction(inventory.APIGetCreateEcsImageProgressMsg):
def __init__(self):
super(GetCreateEcsImageProgressAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetCreateEcsImageProgressAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetCurrentTimeAction(inventory.APIGetCurrentTimeMsg):
def __init__(self):
super(GetCurrentTimeAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetCurrentTimeAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetDataCenterFromRemoteAction(inventory.APIGetDataCenterFromRemoteMsg):
def __init__(self):
super(GetDataCenterFromRemoteAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetDataCenterFromRemoteAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetDataVolumeAttachableVmAction(inventory.APIGetDataVolumeAttachableVmMsg):
def __init__(self):
super(GetDataVolumeAttachableVmAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetDataVolumeAttachableVmAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetEcsInstanceTypeAction(inventory.APIGetEcsInstanceTypeMsg):
def __init__(self):
super(GetEcsInstanceTypeAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetEcsInstanceTypeAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetEcsInstanceVncUrlAction(inventory.APIGetEcsInstanceVncUrlMsg):
def __init__(self):
super(GetEcsInstanceVncUrlAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetEcsInstanceVncUrlAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetEipAttachableVmNicsAction(inventory.APIGetEipAttachableVmNicsMsg):
def __init__(self):
super(GetEipAttachableVmNicsAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetEipAttachableVmNicsAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetEventDataAction(inventory.APIGetEventDataMsg):
def __init__(self):
super(GetEventDataAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetEventDataAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetFreeIpAction(inventory.APIGetFreeIpMsg):
def __init__(self):
super(GetFreeIpAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetFreeIpAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetGlobalConfigAction(inventory.APIGetGlobalConfigMsg):
def __init__(self):
super(GetGlobalConfigAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetGlobalConfigAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetGlobalPropertyAction(inventory.APIGetGlobalPropertyMsg):
def __init__(self):
super(GetGlobalPropertyAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetGlobalPropertyAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetHostAllocatorStrategiesAction(inventory.APIGetHostAllocatorStrategiesMsg):
def __init__(self):
super(GetHostAllocatorStrategiesAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetHostAllocatorStrategiesAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetHostIommuStateAction(inventory.APIGetHostIommuStateMsg):
def __init__(self):
super(GetHostIommuStateAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetHostIommuStateAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetHostIommuStatusAction(inventory.APIGetHostIommuStatusMsg):
def __init__(self):
super(GetHostIommuStatusAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetHostIommuStatusAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetHypervisorTypesAction(inventory.APIGetHypervisorTypesMsg):
def __init__(self):
super(GetHypervisorTypesAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetHypervisorTypesAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetIdentityZoneFromRemoteAction(inventory.APIGetIdentityZoneFromRemoteMsg):
def __init__(self):
super(GetIdentityZoneFromRemoteAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetIdentityZoneFromRemoteAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetImageCandidatesForVmToChangeAction(inventory.APIGetImageCandidatesForVmToChangeMsg):
def __init__(self):
super(GetImageCandidatesForVmToChangeAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetImageCandidatesForVmToChangeAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetImageQgaAction(inventory.APIGetImageQgaMsg):
def __init__(self):
super(GetImageQgaAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetImageQgaAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetImagesFromImageStoreBackupStorageAction(inventory.APIGetImagesFromImageStoreBackupStorageMsg):
def __init__(self):
super(GetImagesFromImageStoreBackupStorageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetImagesFromImageStoreBackupStorageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetInterdependentL3NetworksImagesAction(inventory.APIGetInterdependentL3NetworksImagesMsg):
def __init__(self):
super(GetInterdependentL3NetworksImagesAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetInterdependentL3NetworksImagesAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetIpAddressCapacityAction(inventory.APIGetIpAddressCapacityMsg):
def __init__(self):
super(GetIpAddressCapacityAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetIpAddressCapacityAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetL2NetworkTypesAction(inventory.APIGetL2NetworkTypesMsg):
def __init__(self):
super(GetL2NetworkTypesAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetL2NetworkTypesAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetL3NetworkDhcpIpAddressAction(inventory.APIGetL3NetworkDhcpIpAddressMsg):
def __init__(self):
super(GetL3NetworkDhcpIpAddressAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetL3NetworkDhcpIpAddressAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetL3NetworkMtuAction(inventory.APIGetL3NetworkMtuMsg):
def __init__(self):
super(GetL3NetworkMtuAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetL3NetworkMtuAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetL3NetworkRouterInterfaceIpAction(inventory.APIGetL3NetworkRouterInterfaceIpMsg):
def __init__(self):
super(GetL3NetworkRouterInterfaceIpAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetL3NetworkRouterInterfaceIpAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetL3NetworkTypesAction(inventory.APIGetL3NetworkTypesMsg):
def __init__(self):
super(GetL3NetworkTypesAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetL3NetworkTypesAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetLdapEntryAction(inventory.APIGetLdapEntryMsg):
def __init__(self):
super(GetLdapEntryAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetLdapEntryAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetLicenseCapabilitiesAction(inventory.APIGetLicenseCapabilitiesMsg):
def __init__(self):
super(GetLicenseCapabilitiesAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetLicenseCapabilitiesAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetLicenseInfoAction(inventory.APIGetLicenseInfoMsg):
def __init__(self):
super(GetLicenseInfoAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetLicenseInfoAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetLocalStorageHostDiskCapacityAction(inventory.APIGetLocalStorageHostDiskCapacityMsg):
def __init__(self):
super(GetLocalStorageHostDiskCapacityAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetLocalStorageHostDiskCapacityAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetMetricDataAction(inventory.APIGetMetricDataMsg):
def __init__(self):
super(GetMetricDataAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetMetricDataAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetMetricLabelValueAction(inventory.APIGetMetricLabelValueMsg):
def __init__(self):
super(GetMetricLabelValueAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetMetricLabelValueAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetMonitorItemAction(inventory.APIGetMonitorItemMsg):
def __init__(self):
super(GetMonitorItemAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetMonitorItemAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetNetworkServiceTypesAction(inventory.APIGetNetworkServiceTypesMsg):
def __init__(self):
super(GetNetworkServiceTypesAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetNetworkServiceTypesAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetNicQosAction(inventory.APIGetNicQosMsg):
def __init__(self):
super(GetNicQosAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetNicQosAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetOssBackupBucketFromRemoteAction(inventory.APIGetOssBackupBucketFromRemoteMsg):
def __init__(self):
super(GetOssBackupBucketFromRemoteAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetOssBackupBucketFromRemoteAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetOssBucketFileFromRemoteAction(inventory.APIGetOssBucketFileFromRemoteMsg):
def __init__(self):
super(GetOssBucketFileFromRemoteAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetOssBucketFileFromRemoteAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetOssBucketNameFromRemoteAction(inventory.APIGetOssBucketNameFromRemoteMsg):
def __init__(self):
super(GetOssBucketNameFromRemoteAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetOssBucketNameFromRemoteAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetPciDeviceCandidatesForAttachingVmAction(inventory.APIGetPciDeviceCandidatesForAttachingVmMsg):
def __init__(self):
super(GetPciDeviceCandidatesForAttachingVmAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetPciDeviceCandidatesForAttachingVmAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetPortForwardingAttachableVmNicsAction(inventory.APIGetPortForwardingAttachableVmNicsMsg):
def __init__(self):
super(GetPortForwardingAttachableVmNicsAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetPortForwardingAttachableVmNicsAction] cannot be | |
import fnmatch
import glob
import os
import re
import tempfile
from gppylib import gplog
from gppylib.commands.base import WorkerPool, Command, REMOTE
from gppylib.commands.unix import Scp
from gppylib.db import dbconn
from gppylib.db.dbconn import execSQL
from gppylib.gparray import GpArray
from pygresql import pg
from gppylib.operations.utils import DEFAULT_NUM_WORKERS
logger = gplog.get_default_logger()
def expand_partitions_and_populate_filter_file(dbname, partition_list, file_prefix):
expanded_partitions = expand_partition_tables(dbname, partition_list)
dump_partition_list = list(set(expanded_partitions + partition_list))
return create_temp_file_from_list(dump_partition_list, file_prefix)
def populate_filter_tables(table, rows, non_partition_tables, partition_leaves):
if not rows:
non_partition_tables.append(table)
else:
for (schema_name, partition_leaf_name) in rows:
partition_leaf = schema_name.strip() + '.' + partition_leaf_name.strip()
partition_leaves.append(partition_leaf)
return (non_partition_tables, partition_leaves)
def get_all_parent_tables(dbname):
SQL = "SELECT DISTINCT (schemaname || '.' || tablename) FROM pg_partitions"
data = []
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, SQL)
data = curs.fetchall()
return set([d[0] for d in data])
def list_to_quoted_string(filter_tables):
filter_string = "'" + "', '".join([pg.escape_string(t) for t in filter_tables]) + "'"
return filter_string
def convert_parents_to_leafs(dbname, parents):
partition_leaves_sql = """
SELECT x.partitionschemaname || '.' || x.partitiontablename
FROM (
SELECT distinct schemaname, tablename, partitionschemaname, partitiontablename, partitionlevel
FROM pg_partitions
WHERE schemaname || '.' || tablename in (%s)
) as X,
(SELECT schemaname, tablename maxtable, max(partitionlevel) maxlevel
FROM pg_partitions
group by (tablename, schemaname)
) as Y
WHERE x.schemaname = y.schemaname and x.tablename = Y.maxtable and x.partitionlevel = Y.maxlevel;
"""
if not parents:
return []
conn = dbconn.connect(dbconn.DbURL(dbname=dbname))
partition_sql = partition_leaves_sql % list_to_quoted_string(parents)
curs = dbconn.execSQL(conn, partition_sql)
rows = curs.fetchall()
return [r[0] for r in rows]
#input: list of tables to be filtered
#output: same list but parent tables converted to leafs
def expand_partition_tables(dbname, filter_tables):
if not filter_tables or len(filter_tables) == 0:
return filter_tables
parent_tables = list()
non_parent_tables = list()
expanded_list = list()
all_parent_tables = get_all_parent_tables(dbname)
for table in filter_tables:
if table in all_parent_tables:
parent_tables.append(table)
else:
non_parent_tables.append(table)
expanded_list += non_parent_tables
local_batch_size = 1000
for (s, e) in get_batch_from_list(len(parent_tables), local_batch_size):
tmp = convert_parents_to_leafs(dbname, parent_tables[s:e])
expanded_list += tmp
return expanded_list
def get_batch_from_list(length, batch_size):
indices = []
for i in range(0, length, batch_size):
indices.append((i, i+batch_size))
return indices
def create_temp_file_from_list(entries, prefix):
"""
When writing the entries into temp file, don't do any strip as there might be
white space in schema name and table name.
"""
if len(entries) == 0:
return None
fd = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, delete=False)
for entry in entries:
fd.write(entry + '\n')
tmp_file_name = fd.name
fd.close()
verify_lines_in_file(tmp_file_name, entries)
return tmp_file_name
def create_temp_file_with_tables(table_list):
return create_temp_file_from_list(table_list, 'table_list_')
def create_temp_file_with_schemas(schema_list):
return create_temp_file_from_list(schema_list, 'schema_file_')
def validate_timestamp(timestamp):
if not timestamp:
return False
if len(timestamp) != 14:
return False
if timestamp.isdigit():
return True
else:
return False
def check_successful_dump(report_file_contents):
for line in report_file_contents:
if line.strip() == 'gp_dump utility finished successfully.':
return True
return False
def get_ddboost_backup_directory():
"""
The gpddboost --show-config command, gives us all the ddboost \
configuration details.
Third line of the command output gives us the backup directory \
configured with ddboost.
"""
cmd_str = 'gpddboost --show-config'
cmd = Command('Get the ddboost backup directory', cmd_str)
cmd.run(validateAfter=True)
config = cmd.get_results().stdout.splitlines()
for line in config:
if line.strip().startswith("Default Backup Directory:"):
ddboost_dir = line.split(':')[-1].strip()
if ddboost_dir is None or ddboost_dir == "":
logger.error("Expecting format: Default Backup Directory:<dir>")
raise Exception("DDBOOST default backup directory is not configured. Or the format of the line has changed")
return ddboost_dir
logger.error("Could not find Default Backup Directory:<dir> in stdout")
raise Exception("Output: %s from command %s not in expected format." % (config, cmd_str))
# raise exception for bad data
def convert_reportfilename_to_cdatabasefilename(report_file, dump_prefix, ddboost=False):
(dirname, fname) = os.path.split(report_file)
timestamp = fname[-18:-4]
if ddboost:
dirname = get_ddboost_backup_directory()
dirname = "%s/%s" % (dirname, timestamp[0:8])
return "%s/%sgp_cdatabase_1_1_%s" % (dirname, dump_prefix, timestamp)
def get_lines_from_dd_file(filename):
cmd = Command('DDBoost copy of master dump file',
'gpddboost --readFile --from-file=%s'
% (filename))
cmd.run(validateAfter=True)
contents = cmd.get_results().stdout.splitlines()
return contents
def check_cdatabase_exists(dbname, report_file, dump_prefix, ddboost=False, netbackup_service_host=None, netbackup_block_size=None):
try:
filename = convert_reportfilename_to_cdatabasefilename(report_file, dump_prefix, ddboost)
except Exception:
return False
if ddboost:
cdatabase_contents = get_lines_from_dd_file(filename)
elif netbackup_service_host:
restore_file_with_nbu(netbackup_service_host, netbackup_block_size, filename)
cdatabase_contents = get_lines_from_file(filename)
else:
cdatabase_contents = get_lines_from_file(filename)
dbname = escapeDoubleQuoteInSQLString(dbname, forceDoubleQuote=False)
for line in cdatabase_contents:
if 'CREATE DATABASE' in line:
dump_dbname = get_dbname_from_cdatabaseline(line)
if dump_dbname is None:
continue
else:
if dbname == checkAndRemoveEnclosingDoubleQuote(dump_dbname):
return True
return False
def get_dbname_from_cdatabaseline(line):
"""
Line format: CREATE DATABASE "DBNAME" WITH TEMPLATE = template0 ENCODING = 'UTF8' OWNER = gpadmin;
To get the dbname:
substring between the ending index of the first statement: CREATE DATABASE and the starting index
of WITH TEMPLATE whichever is not inside any double quotes, based on the fact that double quote
inside any name will be escaped by extra double quote, so there's always only one WITH TEMPLATE not
inside any doubles, means its previous and post string should have only even number of double
quotes.
Note: OWER name can also have special characters with double quote.
"""
cdatabase = "CREATE DATABASE "
try:
start = line.index(cdatabase)
except Exception as e:
logger.error('Failed to find substring %s in line %s, error: %s' % (cdatabase, line, str(e)))
return None
with_template = " WITH TEMPLATE = "
all_positions = get_all_occurrences(with_template, line)
if all_positions != None:
for pos in all_positions:
pre_string = line[:pos]
post_string = line[pos + len(with_template):]
double_quotes_before = get_all_occurrences('"', pre_string)
double_quotes_after = get_all_occurrences('"', post_string)
num_double_quotes_before = 0 if double_quotes_before is None else len(double_quotes_before)
num_double_quotes_after = 0 if double_quotes_after is None else len(double_quotes_after)
if num_double_quotes_before % 2 == 0 and num_double_quotes_after % 2 == 0:
dbname = line[start+len(cdatabase) : pos]
return dbname
return None
def get_all_occurrences(substr, line):
if substr is None or line is None or len(substr) > len(line):
return None
return [m.start() for m in re.finditer('(?=%s)' % substr, line)]
def get_type_ts_from_report_file(dbname, report_file, backup_type, dump_prefix, ddboost=False, netbackup_service_host=None, netbackup_block_size=None):
report_file_contents = get_lines_from_file(report_file)
if not check_successful_dump(report_file_contents):
return None
if not check_cdatabase_exists(dbname, report_file, dump_prefix, ddboost, netbackup_service_host, netbackup_block_size):
return None
if check_backup_type(report_file_contents, backup_type):
return get_timestamp_val(report_file_contents)
return None
def get_full_ts_from_report_file(dbname, report_file, dump_prefix, ddboost=False, netbackup_service_host=None, netbackup_block_size=None):
return get_type_ts_from_report_file(dbname, report_file, 'Full', dump_prefix, ddboost, netbackup_service_host, netbackup_block_size)
def get_incremental_ts_from_report_file(dbname, report_file, dump_prefix, ddboost=False, netbackup_service_host=None, netbackup_block_size=None):
return get_type_ts_from_report_file(dbname, report_file, 'Incremental', dump_prefix, ddboost, netbackup_service_host, netbackup_block_size)
def get_timestamp_val(report_file_contents):
for line in report_file_contents:
if line.startswith('Timestamp Key'):
timestamp = line.split(':')[-1].strip()
if not validate_timestamp(timestamp):
raise Exception('Invalid timestamp value found in report_file')
return timestamp
return None
def check_backup_type(report_file_contents, backup_type):
for line in report_file_contents:
if line.startswith('Backup Type'):
if line.split(':')[-1].strip() == backup_type:
return True
return False
def get_lines_from_file(fname, ddboost=None):
"""
Don't strip white space here as it may be part of schema name and table name
"""
content = []
if ddboost:
contents = get_lines_from_dd_file(fname)
return contents
else:
with open(fname) as fd:
for line in fd:
content.append(line.strip('\n'))
return content
def write_lines_to_file(filename, lines):
"""
Don't do strip in line for white space in case it is part of schema name or table name
"""
with open(filename, 'w') as fp:
for line in lines:
fp.write("%s\n" % line.strip('\n'))
def verify_lines_in_file(fname, expected):
lines = get_lines_from_file(fname)
if lines != expected:
raise Exception("After writing file '%s' contents not as expected.\n"
"Lines read from file %s\n"
"Lines expected from file %s\n"
"Suspected IO error" % (fname, lines, expected))
def check_dir_writable(directory):
fp = None
try:
tmp_file = os.path.join(directory, 'tmp_file')
fp = open(tmp_file, 'w')
except IOError as e:
raise Exception('No write access permission on %s' % directory)
except Exception as e:
raise Exception(str(e))
finally:
if fp is not None:
fp.close()
if os.path.isfile(tmp_file):
os.remove(tmp_file)
def execute_sql(query, master_port, dbname):
dburl = dbconn.DbURL(port=master_port, dbname=dbname)
conn = dbconn.connect(dburl)
cursor = execSQL(conn, query)
return cursor.fetchall()
def get_backup_directory(master_data_dir, backup_dir, dump_dir, timestamp):
if backup_dir:
use_dir = backup_dir
elif master_data_dir:
use_dir = master_data_dir
else:
raise Exception("Can not locate backup directory with existing parameters")
if not timestamp:
raise Exception("Can not locate backup directory without timestamp")
if not validate_timestamp(timestamp):
raise Exception('Invalid timestamp: "%s"' % timestamp)
return "%s/%s/%s" % (use_dir, dump_dir, timestamp[0:8])
def generate_schema_filename(master_data_dir, backup_dir, dump_dir, dump_prefix, timestamp, ddboost=False):
if ddboost:
return "%s/%s/%s/%sgp_dump_%s_schema" % (master_data_dir, dump_dir, timestamp[0:8], dump_prefix, timestamp)
use_dir = get_backup_directory(master_data_dir, backup_dir, dump_dir, timestamp)
return "%s/%sgp_dump_%s_schema" % (use_dir, dump_prefix, timestamp)
def generate_report_filename(master_data_dir, backup_dir, dump_dir, dump_prefix, timestamp, ddboost=False):
if ddboost:
return "%s/%s/%s/%sgp_dump_%s.rpt" % (master_data_dir, dump_dir, timestamp[0:8], dump_prefix, timestamp)
use_dir = get_backup_directory(master_data_dir, backup_dir, dump_dir, timestamp)
return "%s/%sgp_dump_%s.rpt" % (use_dir, dump_prefix, timestamp)
def generate_increments_filename(master_data_dir, backup_dir, dump_dir, dump_prefix, timestamp, ddboost=False):
if ddboost:
return "%s/%s/%s/%sgp_dump_%s_increments" % (master_data_dir, dump_dir, timestamp[0:8], dump_prefix, timestamp)
use_dir = get_backup_directory(master_data_dir, backup_dir, dump_dir, timestamp)
return "%s/%sgp_dump_%s_increments" % (use_dir, dump_prefix, timestamp)
def generate_pgstatlastoperation_filename(master_data_dir, backup_dir, dump_dir, dump_prefix, timestamp, ddboost=False):
if ddboost:
return "%s/%s/%s/%sgp_dump_%s_last_operation" % (master_data_dir, dump_dir, timestamp[0:8], dump_prefix, timestamp)
use_dir = get_backup_directory(master_data_dir, backup_dir, dump_dir, timestamp)
return "%s/%sgp_dump_%s_last_operation" % (use_dir, dump_prefix, timestamp)
def generate_dirtytable_filename(master_data_dir, | |
255);")
self.no3_dr.setText("")
self.no3_dr.setAlignment(QtCore.Qt.AlignCenter)
self.no3_dr.setObjectName("no3_dr")
self.no5_dr = QtWidgets.QLabel(self.th3dr)
self.no5_dr.setGeometry(QtCore.QRect(540, 400, 211, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.no5_dr.setFont(font)
self.no5_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.no5_dr.setText("")
self.no5_dr.setAlignment(QtCore.Qt.AlignCenter)
self.no5_dr.setObjectName("no5_dr")
self.a2_dr = QtWidgets.QLabel(self.th3dr)
self.a2_dr.setGeometry(QtCore.QRect(390, 250, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.a2_dr.setFont(font)
self.a2_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.a2_dr.setText("")
self.a2_dr.setAlignment(QtCore.Qt.AlignCenter)
self.a2_dr.setObjectName("a2_dr")
self.n1_dr = QtWidgets.QLabel(self.th3dr)
self.n1_dr.setGeometry(QtCore.QRect(230, 200, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n1_dr.setFont(font)
self.n1_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.n1_dr.setText("")
self.n1_dr.setAlignment(QtCore.Qt.AlignCenter)
self.n1_dr.setObjectName("n1_dr")
self.no0_dr = QtWidgets.QLabel(self.th3dr)
self.no0_dr.setGeometry(QtCore.QRect(540, 150, 211, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.no0_dr.setFont(font)
self.no0_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.no0_dr.setText("")
self.no0_dr.setAlignment(QtCore.Qt.AlignCenter)
self.no0_dr.setObjectName("no0_dr")
self.a5_dr = QtWidgets.QLabel(self.th3dr)
self.a5_dr.setGeometry(QtCore.QRect(390, 400, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.a5_dr.setFont(font)
self.a5_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.a5_dr.setText("")
self.a5_dr.setAlignment(QtCore.Qt.AlignCenter)
self.a5_dr.setObjectName("a5_dr")
self.n6_dr = QtWidgets.QLabel(self.th3dr)
self.n6_dr.setGeometry(QtCore.QRect(230, 450, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n6_dr.setFont(font)
self.n6_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.n6_dr.setText("")
self.n6_dr.setAlignment(QtCore.Qt.AlignCenter)
self.n6_dr.setObjectName("n6_dr")
self.n3_dr = QtWidgets.QLabel(self.th3dr)
self.n3_dr.setGeometry(QtCore.QRect(230, 300, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n3_dr.setFont(font)
self.n3_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.n3_dr.setText("")
self.n3_dr.setAlignment(QtCore.Qt.AlignCenter)
self.n3_dr.setObjectName("n3_dr")
self.copy_manage2_dr = QtWidgets.QPushButton(self.th3dr)
self.copy_manage2_dr.setGeometry(QtCore.QRect(760, 200, 31, 31))
self.copy_manage2_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.copy_manage2_dr.setStyleSheet("background-color: rgba(24, 37, 51, 100);\n"
"color: rgb(255, 255, 255);")
self.copy_manage2_dr.setText("")
self.copy_manage2_dr.setIcon(icon9)
self.copy_manage2_dr.setIconSize(QtCore.QSize(30, 30))
self.copy_manage2_dr.setObjectName("copy_manage2_dr")
self.no1_dr = QtWidgets.QLabel(self.th3dr)
self.no1_dr.setGeometry(QtCore.QRect(540, 200, 211, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.no1_dr.setFont(font)
self.no1_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.no1_dr.setText("")
self.no1_dr.setAlignment(QtCore.Qt.AlignCenter)
self.no1_dr.setObjectName("no1_dr")
self.copy_manage6_dr = QtWidgets.QPushButton(self.th3dr)
self.copy_manage6_dr.setGeometry(QtCore.QRect(760, 400, 31, 31))
self.copy_manage6_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.copy_manage6_dr.setStyleSheet("background-color: rgba(24, 37, 51, 100);\n"
"color: rgb(255, 255, 255);")
self.copy_manage6_dr.setText("")
self.copy_manage6_dr.setIcon(icon9)
self.copy_manage6_dr.setIconSize(QtCore.QSize(30, 30))
self.copy_manage6_dr.setObjectName("copy_manage6_dr")
self.n2_dr = QtWidgets.QLabel(self.th3dr)
self.n2_dr.setGeometry(QtCore.QRect(230, 250, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n2_dr.setFont(font)
self.n2_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.n2_dr.setText("")
self.n2_dr.setAlignment(QtCore.Qt.AlignCenter)
self.n2_dr.setObjectName("n2_dr")
self.a4_dr = QtWidgets.QLabel(self.th3dr)
self.a4_dr.setGeometry(QtCore.QRect(390, 350, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.a4_dr.setFont(font)
self.a4_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.a4_dr.setText("")
self.a4_dr.setAlignment(QtCore.Qt.AlignCenter)
self.a4_dr.setObjectName("a4_dr")
self.n4_dr = QtWidgets.QLabel(self.th3dr)
self.n4_dr.setGeometry(QtCore.QRect(230, 350, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n4_dr.setFont(font)
self.n4_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.n4_dr.setText("")
self.n4_dr.setAlignment(QtCore.Qt.AlignCenter)
self.n4_dr.setObjectName("n4_dr")
self.copy_manage4_dr = QtWidgets.QPushButton(self.th3dr)
self.copy_manage4_dr.setGeometry(QtCore.QRect(760, 300, 31, 31))
self.copy_manage4_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.copy_manage4_dr.setStyleSheet("background-color: rgba(24, 37, 51, 100);\n"
"color: rgb(255, 255, 255);")
self.copy_manage4_dr.setText("")
self.copy_manage4_dr.setIcon(icon9)
self.copy_manage4_dr.setIconSize(QtCore.QSize(30, 30))
self.copy_manage4_dr.setObjectName("copy_manage4_dr")
self.n5_dr = QtWidgets.QLabel(self.th3dr)
self.n5_dr.setGeometry(QtCore.QRect(230, 400, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n5_dr.setFont(font)
self.n5_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.n5_dr.setText("")
self.n5_dr.setAlignment(QtCore.Qt.AlignCenter)
self.n5_dr.setObjectName("n5_dr")
self.no4_dr = QtWidgets.QLabel(self.th3dr)
self.no4_dr.setGeometry(QtCore.QRect(540, 350, 211, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.no4_dr.setFont(font)
self.no4_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.no4_dr.setText("")
self.no4_dr.setAlignment(QtCore.Qt.AlignCenter)
self.no4_dr.setObjectName("no4_dr")
self.n0_dr = QtWidgets.QLabel(self.th3dr)
self.n0_dr.setGeometry(QtCore.QRect(230, 150, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n0_dr.setFont(font)
self.n0_dr.setStyleSheet("background-color: rgba(224, 224, 224, 60);\n"
"color: rgb(255, 255, 255);")
self.n0_dr.setText("")
self.n0_dr.setAlignment(QtCore.Qt.AlignCenter)
self.n0_dr.setObjectName("n0_dr")
self.copy_manage5_dr = QtWidgets.QPushButton(self.th3dr)
self.copy_manage5_dr.setGeometry(QtCore.QRect(760, 350, 31, 31))
self.copy_manage5_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.copy_manage5_dr.setStyleSheet("background-color: rgba(24, 37, 51, 100);\n"
"color: rgb(255, 255, 255);")
self.copy_manage5_dr.setText("")
self.copy_manage5_dr.setIcon(icon9)
self.copy_manage5_dr.setIconSize(QtCore.QSize(30, 30))
self.copy_manage5_dr.setObjectName("copy_manage5_dr")
self.showall_dr = QtWidgets.QPushButton(self.th3dr)
self.showall_dr.setGeometry(QtCore.QRect(390, 500, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(13)
self.showall_dr.setFont(font)
self.showall_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.showall_dr.setStyleSheet("background-color: rgb(94, 181, 247);\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.showall_dr.setIconSize(QtCore.QSize(30, 30))
self.showall_dr.setObjectName("showall_dr")
self.closegradient_9 = QtWidgets.QPushButton(self.th3dr)
self.closegradient_9.setGeometry(QtCore.QRect(770, 10, 31, 21))
self.closegradient_9.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.closegradient_9.setStyleSheet("background-color: none;\n"
"")
self.closegradient_9.setText("")
self.closegradient_9.setIcon(icon8)
self.closegradient_9.setIconSize(QtCore.QSize(36, 37))
self.closegradient_9.setObjectName("closegradient_9")
self.mingradient_9 = QtWidgets.QPushButton(self.th3dr)
self.mingradient_9.setGeometry(QtCore.QRect(730, 0, 41, 31))
self.mingradient_9.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.mingradient_9.setStyleSheet("background-color: none;\n"
"color: rgb(94, 181, 247);\n"
"")
self.mingradient_9.setText("")
self.mingradient_9.setIcon(icon7)
self.mingradient_9.setIconSize(QtCore.QSize(31, 37))
self.mingradient_9.setObjectName("mingradient_9")
self.stackedWidget.addWidget(self.th3dr)
self.fivedr = QtWidgets.QWidget()
self.fivedr.setStyleSheet("background-color: rgb(14, 22, 33);\n"
"border-radius: 15px;")
self.fivedr.setObjectName("fivedr")
self.frame_16 = QtWidgets.QFrame(self.fivedr)
self.frame_16.setGeometry(QtCore.QRect(0, 0, 201, 561))
self.frame_16.setStyleSheet("border-radius:15px;\n"
"background-color: rgb(23, 33, 43);")
self.frame_16.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_16.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_16.setObjectName("frame_16")
self.label_38 = QtWidgets.QLabel(self.frame_16)
self.label_38.setGeometry(QtCore.QRect(0, 0, 201, 51))
font = QtGui.QFont()
font.setFamily("Bauhaus 93")
font.setPointSize(16)
font.setUnderline(False)
self.label_38.setFont(font)
self.label_38.setStyleSheet("color: #5eb5f7;\n"
"border-radius: 8px;")
self.label_38.setAlignment(QtCore.Qt.AlignCenter)
self.label_38.setObjectName("label_38")
self.gener5_dr = QtWidgets.QPushButton(self.frame_16)
self.gener5_dr.setGeometry(QtCore.QRect(20, 90, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.gener5_dr.setFont(font)
self.gener5_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.gener5_dr.setStyleSheet("background-color: #26313c;\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.gener5_dr.setIconSize(QtCore.QSize(30, 30))
self.gener5_dr.setObjectName("gener5_dr")
self.insert5_dr = QtWidgets.QPushButton(self.frame_16)
self.insert5_dr.setGeometry(QtCore.QRect(20, 150, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.insert5_dr.setFont(font)
self.insert5_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.insert5_dr.setStyleSheet("background-color: #26313c;\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.insert5_dr.setIconSize(QtCore.QSize(30, 30))
self.insert5_dr.setObjectName("insert5_dr")
self.manage5_dr = QtWidgets.QPushButton(self.frame_16)
self.manage5_dr.setGeometry(QtCore.QRect(20, 210, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.manage5_dr.setFont(font)
self.manage5_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.manage5_dr.setStyleSheet("background-color: #26313c;\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.manage5_dr.setIconSize(QtCore.QSize(30, 30))
self.manage5_dr.setObjectName("manage5_dr")
self.setti5_dr = QtWidgets.QPushButton(self.frame_16)
self.setti5_dr.setGeometry(QtCore.QRect(20, 270, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.setti5_dr.setFont(font)
self.setti5_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.setti5_dr.setStyleSheet("background-color: #26313c;\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.setti5_dr.setIconSize(QtCore.QSize(30, 30))
self.setti5_dr.setObjectName("setti5_dr")
self.about5_dr = QtWidgets.QPushButton(self.frame_16)
self.about5_dr.setGeometry(QtCore.QRect(20, 330, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.about5_dr.setFont(font)
self.about5_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.about5_dr.setStyleSheet("background-color: rgb(43, 82, 120);\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.about5_dr.setIconSize(QtCore.QSize(30, 30))
self.about5_dr.setObjectName("about5_dr")
self.logout5dr = QtWidgets.QPushButton(self.frame_16)
self.logout5dr.setGeometry(QtCore.QRect(20, 390, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.logout5dr.setFont(font)
self.logout5dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.logout5dr.setStyleSheet("background-color: #26313c;\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.logout5dr.setIconSize(QtCore.QSize(30, 30))
self.logout5dr.setObjectName("logout5dr")
self.mode5dr = QtWidgets.QPushButton(self.frame_16)
self.mode5dr.setGeometry(QtCore.QRect(20, 450, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.mode5dr.setFont(font)
self.mode5dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.mode5dr.setStyleSheet("background-color: #26313c;\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.mode5dr.setText("")
self.mode5dr.setIconSize(QtCore.QSize(30, 30))
self.mode5dr.setObjectName("mode5dr")
self.mode_lig5 = QtWidgets.QPushButton(self.frame_16)
self.mode_lig5.setGeometry(QtCore.QRect(130, 450, 51, 41))
self.mode_lig5.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.mode_lig5.setStyleSheet("background-color: none;")
self.mode_lig5.setText("")
self.mode_lig5.setIcon(icon1)
self.mode_lig5.setIconSize(QtCore.QSize(31, 32))
self.mode_lig5.setObjectName("mode_lig5")
self.lb_dark5 = QtWidgets.QPushButton(self.frame_16)
self.lb_dark5.setGeometry(QtCore.QRect(20, 450, 111, 41))
self.lb_dark5.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.lb_dark5.setStyleSheet("background-color: rgb(43, 82, 120);\n"
"border-radius: 8px;\n"
"border-top-right-radius: 0px;\n"
"border-bottom-right-radius: 0px;")
self.lb_dark5.setText("")
self.lb_dark5.setIcon(icon6)
self.lb_dark5.setIconSize(QtCore.QSize(31, 32))
self.lb_dark5.setObjectName("lb_dark5")
self.label_40 = QtWidgets.QLabel(self.fivedr)
self.label_40.setGeometry(QtCore.QRect(230, 30, 451, 41))
font = QtGui.QFont()
font.setFamily("Barlow Condensed ExtraBold")
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label_40.setFont(font)
self.label_40.setStyleSheet("background-color: none;\n"
"color: #5eb5f7;")
self.label_40.setObjectName("label_40")
self.githubdr = QtWidgets.QPushButton(self.fivedr)
self.githubdr.setGeometry(QtCore.QRect(280, 500, 141, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.githubdr.setFont(font)
self.githubdr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.githubdr.setStyleSheet("background-color: rgba(24, 37, 51, 100);\n"
"color: rgb(247, 247, 247);")
self.githubdr.setIconSize(QtCore.QSize(30, 30))
self.githubdr.setObjectName("githubdr")
self.contact_dr = QtWidgets.QPushButton(self.fivedr)
self.contact_dr.setGeometry(QtCore.QRect(440, 500, 141, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.contact_dr.setFont(font)
self.contact_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.contact_dr.setStyleSheet("background-color: rgba(24, 37, 51, 100);\n"
"color: rgb(247, 247, 247);")
self.contact_dr.setIconSize(QtCore.QSize(30, 30))
self.contact_dr.setObjectName("contact_dr")
self.social_dr = QtWidgets.QPushButton(self.fivedr)
self.social_dr.setGeometry(QtCore.QRect(600, 500, 141, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.social_dr.setFont(font)
self.social_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.social_dr.setStyleSheet("background-color: rgba(24, 37, 51, 100);\n"
"color: rgb(247, 247, 247);")
self.social_dr.setIconSize(QtCore.QSize(30, 30))
self.social_dr.setObjectName("social_dr")
self.closegradient_10 = QtWidgets.QPushButton(self.fivedr)
self.closegradient_10.setGeometry(QtCore.QRect(770, 10, 31, 21))
self.closegradient_10.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.closegradient_10.setStyleSheet("background-color: none;\n"
"")
self.closegradient_10.setText("")
self.closegradient_10.setIcon(icon8)
self.closegradient_10.setIconSize(QtCore.QSize(36, 37))
self.closegradient_10.setObjectName("closegradient_10")
self.mingradient_10 = QtWidgets.QPushButton(self.fivedr)
self.mingradient_10.setGeometry(QtCore.QRect(730, 0, 41, 31))
self.mingradient_10.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.mingradient_10.setStyleSheet("background-color: none;\n"
"color: rgb(94, 181, 247);\n"
"")
self.mingradient_10.setText("")
self.mingradient_10.setIcon(icon7)
self.mingradient_10.setIconSize(QtCore.QSize(31, 37))
self.mingradient_10.setObjectName("mingradient_10")
self.frame_17 = QtWidgets.QFrame(self.fivedr)
self.frame_17.setGeometry(QtCore.QRect(230, 290, 561, 171))
self.frame_17.setStyleSheet("background-color: rgba(24, 37, 51, 100);\n"
"border-radius: 20px;")
self.frame_17.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_17.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_17.setObjectName("frame_17")
self.label_26 = QtWidgets.QLabel(self.frame_17)
self.label_26.setGeometry(QtCore.QRect(-30, 80, 621, 41))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(10)
self.label_26.setFont(font)
self.label_26.setStyleSheet("background-color: none;\n"
"color: rgb(247, 247, 247);")
self.label_26.setAlignment(QtCore.Qt.AlignCenter)
self.label_26.setObjectName("label_26")
self.label_29 = QtWidgets.QLabel(self.frame_17)
self.label_29.setGeometry(QtCore.QRect(-30, 40, 621, 41))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(10)
self.label_29.setFont(font)
self.label_29.setStyleSheet("background-color: none;\n"
"color: rgb(247, 247, 247);")
self.label_29.setAlignment(QtCore.Qt.AlignCenter)
self.label_29.setObjectName("label_29")
self.label_31 = QtWidgets.QLabel(self.frame_17)
self.label_31.setGeometry(QtCore.QRect(-30, 120, 621, 41))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(10)
self.label_31.setFont(font)
self.label_31.setStyleSheet("background-color: none;\n"
"color: rgb(247, 247, 247);")
self.label_31.setAlignment(QtCore.Qt.AlignCenter)
self.label_31.setObjectName("label_31")
self.label_33 = QtWidgets.QLabel(self.frame_17)
self.label_33.setGeometry(QtCore.QRect(-30, 0, 621, 41))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(10)
self.label_33.setFont(font)
self.label_33.setStyleSheet("background-color: none;\n"
"color: rgb(247, 247, 247);")
self.label_33.setAlignment(QtCore.Qt.AlignCenter)
self.label_33.setObjectName("label_33")
self.label_7 = QtWidgets.QLabel(self.fivedr)
self.label_7.setGeometry(QtCore.QRect(210, 80, 611, 211))
self.label_7.setStyleSheet("background-color: none;")
self.label_7.setText("")
self.label_7.setPixmap(QtGui.QPixmap("about_pic_dr.png"))
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.frame_17.raise_()
self.frame_16.raise_()
self.label_40.raise_()
self.githubdr.raise_()
self.contact_dr.raise_()
self.social_dr.raise_()
self.closegradient_10.raise_()
self.mingradient_10.raise_()
self.label_7.raise_()
self.stackedWidget.addWidget(self.fivedr)
self.six_dr = QtWidgets.QWidget()
self.six_dr.setStyleSheet("background-color: rgb(14, 22, 33);\n"
"border-radius: 15px;")
self.six_dr.setObjectName("six_dr")
self.frame_19 = QtWidgets.QFrame(self.six_dr)
self.frame_19.setGeometry(QtCore.QRect(0, 0, 201, 561))
self.frame_19.setStyleSheet("border-radius:15px;\n"
"background-color: rgb(23, 33, 43);")
self.frame_19.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_19.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_19.setObjectName("frame_19")
self.label_44 = QtWidgets.QLabel(self.frame_19)
self.label_44.setGeometry(QtCore.QRect(0, 0, 201, 51))
font = QtGui.QFont()
font.setFamily("Bauhaus 93")
font.setPointSize(16)
font.setUnderline(False)
self.label_44.setFont(font)
self.label_44.setStyleSheet("color: #5eb5f7;\n"
"border-radius: 8px;")
self.label_44.setAlignment(QtCore.Qt.AlignCenter)
self.label_44.setObjectName("label_44")
self.gener6_dr = QtWidgets.QPushButton(self.frame_19)
self.gener6_dr.setGeometry(QtCore.QRect(20, 90, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.gener6_dr.setFont(font)
self.gener6_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.gener6_dr.setStyleSheet("background-color: #26313c;\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.gener6_dr.setIconSize(QtCore.QSize(30, 30))
self.gener6_dr.setObjectName("gener6_dr")
self.insert6_dr = QtWidgets.QPushButton(self.frame_19)
self.insert6_dr.setGeometry(QtCore.QRect(20, 150, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.insert6_dr.setFont(font)
self.insert6_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.insert6_dr.setStyleSheet("background-color: #26313c;\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.insert6_dr.setIconSize(QtCore.QSize(30, 30))
self.insert6_dr.setObjectName("insert6_dr")
self.manage6_dr = QtWidgets.QPushButton(self.frame_19)
self.manage6_dr.setGeometry(QtCore.QRect(20, 210, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.manage6_dr.setFont(font)
self.manage6_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.manage6_dr.setStyleSheet("background-color: #26313c;\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.manage6_dr.setIconSize(QtCore.QSize(30, 30))
self.manage6_dr.setObjectName("manage6_dr")
self.setti6_dr = QtWidgets.QPushButton(self.frame_19)
self.setti6_dr.setGeometry(QtCore.QRect(20, 270, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.setti6_dr.setFont(font)
self.setti6_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.setti6_dr.setStyleSheet("background-color: #26313c;\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.setti6_dr.setIconSize(QtCore.QSize(30, 30))
self.setti6_dr.setObjectName("setti6_dr")
self.about6_dr = QtWidgets.QPushButton(self.frame_19)
self.about6_dr.setGeometry(QtCore.QRect(20, 330, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.about6_dr.setFont(font)
self.about6_dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.about6_dr.setStyleSheet("background-color: #26313c;\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.about6_dr.setIconSize(QtCore.QSize(30, 30))
self.about6_dr.setObjectName("about6_dr")
self.logout6dr = QtWidgets.QPushButton(self.frame_19)
self.logout6dr.setGeometry(QtCore.QRect(20, 450, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.logout6dr.setFont(font)
self.logout6dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.logout6dr.setStyleSheet("background-color: #26313c;\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.logout6dr.setIconSize(QtCore.QSize(30, 30))
self.logout6dr.setObjectName("logout6dr")
self.priv6dr = QtWidgets.QPushButton(self.frame_19)
self.priv6dr.setGeometry(QtCore.QRect(20, 390, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.priv6dr.setFont(font)
self.priv6dr.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.priv6dr.setStyleSheet("background-color: rgb(43, 82, 120);\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.priv6dr.setIconSize(QtCore.QSize(30, 30))
self.priv6dr.setObjectName("priv6dr")
self.label_45 = QtWidgets.QLabel(self.six_dr)
self.label_45.setGeometry(QtCore.QRect(200, 470, 621, 41))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(10)
self.label_45.setFont(font)
self.label_45.setStyleSheet("background-color: none;\n"
"color: rgb(255, 255, 255);")
self.label_45.setAlignment(QtCore.Qt.AlignCenter)
self.label_45.setObjectName("label_45")
self.label_46 = QtWidgets.QLabel(self.six_dr)
self.label_46.setGeometry(QtCore.QRect(200, 390, 621, 41))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(10)
self.label_46.setFont(font)
self.label_46.setStyleSheet("background-color: none;\n"
"color: rgb(255, 255, 255);")
self.label_46.setAlignment(QtCore.Qt.AlignCenter)
self.label_46.setObjectName("label_46")
self.label_47 = QtWidgets.QLabel(self.six_dr)
| |
pos_adjustment
if length <= 0:
# We got all the data we need, go back immediately
yield Transition(None, whence)
trans = Transition(stream_event, self)
@coroutine
def _invalid_handler(type_octet, ctx):
"""Placeholder co-routine for invalid type codes."""
yield
raise IonException('Invalid type octet: 0x%02X' % type_octet)
@coroutine
def _var_uint_field_handler(handler, ctx):
"""Handler co-routine for variable unsigned integer fields that.
Invokes the given ``handler`` function with the read field and context,
then immediately yields to the resulting co-routine.
"""
_, self = yield
queue = ctx.queue
value = 0
while True:
if len(queue) == 0:
# We don't know when the field ends, so read at least one byte.
yield ctx.read_data_transition(1, self)
octet = queue.read_byte()
value <<= _VAR_INT_VALUE_BITS
value |= octet & _VAR_INT_VALUE_MASK
if octet & _VAR_INT_SIGNAL_MASK:
break
yield ctx.immediate_transition(handler(value, ctx))
@coroutine
def _ivm_handler(ctx):
_, self = yield
if ctx.depth != 0:
raise IonException('IVM encountered below top-level')
yield ctx.read_data_transition(_IVM_TAIL_LEN, self)
ivm_tail = ctx.queue.read(_IVM_TAIL_LEN)
if _IVM_TAIL != ivm_tail:
raise IonException('Invalid IVM tail: %r' % ivm_tail)
yield Transition(ION_VERSION_MARKER_EVENT, ctx.whence)
@coroutine
def _nop_pad_handler(ion_type, length, ctx):
yield
if ctx.field_name is not None and ctx.field_name != SYMBOL_ZERO_TOKEN:
raise IonException(
'Cannot have NOP pad with non-zero symbol field, field SID %d' % ctx.field_name)
if length > 0:
yield ctx.read_data_transition(length, ctx.whence, skip=True)
# Nothing to skip, so we just go back from whence we came...
yield ctx.immediate_transition()
@coroutine
def _static_scalar_handler(ion_type, value, ctx):
yield
yield ctx.event_transition(IonEvent, IonEventType.SCALAR, ion_type, value)
@coroutine
def _length_scalar_handler(scalar_factory, ion_type, length, ctx):
"""Handles scalars, ``scalar_factory`` is a function that returns a value or thunk."""
_, self = yield
if length == 0:
data = b''
else:
yield ctx.read_data_transition(length, self)
data = ctx.queue.read(length)
scalar = scalar_factory(data)
event_cls = IonEvent
if callable(scalar):
# TODO Wrap the exception to get context position.
event_cls = IonThunkEvent
yield ctx.event_transition(event_cls, IonEventType.SCALAR, ion_type, scalar)
@coroutine
def _start_type_handler(field_name, whence, ctx, expects_ivm=False, at_top=False, annotations=None):
_, self = yield
child_position = ctx.queue.position
# Read type byte.
if at_top:
incomplete_event = ION_STREAM_END_EVENT
else:
incomplete_event = ION_STREAM_INCOMPLETE_EVENT
yield ctx.read_data_transition(1, self, stream_event=incomplete_event)
type_octet = ctx.queue.read_byte()
if expects_ivm and type_octet != _IVM_START_OCTET:
raise IonException(
'Expected binary version marker, got: %02X' % type_octet)
handler = _HANDLER_DISPATCH_TABLE[type_octet]
child_ctx = ctx.derive_child_context(child_position, field_name, annotations, whence)
yield ctx.immediate_transition(handler(child_ctx))
@coroutine
def _annotation_handler(ion_type, length, ctx):
"""Handles annotations. ``ion_type`` is ignored."""
_, self = yield
self_handler = _create_delegate_handler(self)
if ctx.annotations is not None:
raise IonException('Annotation cannot be nested in annotations')
# We have to replace our context for annotations specifically to encapsulate the limit
ctx = ctx.derive_container_context(length, add_depth=0)
# Immediately read the length field and the annotations
(ann_length, _), _ = yield ctx.immediate_transition(
_var_uint_field_handler(self_handler, ctx)
)
if ann_length < 1:
raise IonException('Invalid annotation length subfield; annotation wrapper must have at least one annotation.')
# Read/parse the annotations.
yield ctx.read_data_transition(ann_length, self)
ann_data = ctx.queue.read(ann_length)
annotations = tuple(_parse_sid_iter(ann_data))
if ctx.limit - ctx.queue.position < 1:
# There is no space left for the 'value' subfield, which is required.
raise IonException('Incorrect annotation wrapper length.')
# Go parse the start of the value but go back to the real parent container.
yield ctx.immediate_transition(
_start_type_handler(ctx.field_name, ctx.whence, ctx, annotations=annotations)
)
@coroutine
def _ordered_struct_start_handler(handler, ctx):
"""Handles the special case of ordered structs, specified by the type ID 0xD1.
This coroutine's only purpose is to ensure that the struct in question declares at least one field name/value pair,
as required by the spec.
"""
_, self = yield
self_handler = _create_delegate_handler(self)
(length, _), _ = yield ctx.immediate_transition(
_var_uint_field_handler(self_handler, ctx)
)
if length < 2:
# A valid field name/value pair is at least two octets: one for the field name SID and one for the value.
raise IonException('Ordered structs (type ID 0xD1) must have at least one field name/value pair.')
yield ctx.immediate_transition(handler(length, ctx))
@coroutine
def _container_start_handler(ion_type, length, ctx):
"""Handles container delegation."""
_, self = yield
container_ctx = ctx.derive_container_context(length)
if ctx.annotations and ctx.limit != container_ctx.limit:
# 'ctx' is the annotation wrapper context. `container_ctx` represents the wrapper's 'value' subfield. Their
# limits must match.
raise IonException('Incorrect annotation wrapper length.')
delegate = _container_handler(ion_type, container_ctx)
# We start the container, and transition to the new container processor.
yield ctx.event_transition(
IonEvent, IonEventType.CONTAINER_START, ion_type, value=None, whence=delegate
)
@coroutine
def _container_handler(ion_type, ctx):
"""Handler for the body of a container (or the top-level stream).
Args:
ion_type (Optional[IonType]): The type of the container or ``None`` for the top-level.
ctx (_HandlerContext): The context for the container.
"""
transition = None
first = True
at_top = ctx.depth == 0
while True:
data_event, self = (yield transition)
if data_event is not None and data_event.type is ReadEventType.SKIP:
yield ctx.read_data_transition(ctx.remaining, self, skip=True)
if ctx.queue.position == ctx.limit:
# We are at the end of the container.
# Yield the close event and go to enclosing container.
yield Transition(
IonEvent(IonEventType.CONTAINER_END, ion_type, depth=ctx.depth-1),
ctx.whence
)
if ion_type is IonType.STRUCT:
# Read the field name.
self_handler = _create_delegate_handler(self)
(field_sid, _), _ = yield ctx.immediate_transition(
_var_uint_field_handler(self_handler, ctx)
)
field_name = SymbolToken(None, field_sid)
else:
field_name = None
expects_ivm = first and at_top
transition = ctx.immediate_transition(
_start_type_handler(field_name, self, ctx, expects_ivm, at_top=at_top)
)
first = False
#
# Scalar Factories
#
def _rslice(data, rem, size):
start = -rem
end = start + size
if end >= 0:
end = None
return data[slice(start, end)]
def _int_factory(sign, data):
def parse_int():
value = 0
length = len(data)
while length >= 8:
segment = _rslice(data, length, 8)
value <<= 64
value |= unpack('>Q', segment)[0]
length -= 8
if length >= 4:
segment = _rslice(data, length, 4)
value <<= 32
value |= unpack('>I', segment)[0]
length -= 4
if length >= 2:
segment = _rslice(data, length, 2)
value <<= 16
value |= unpack('>H', segment)[0]
length -= 2
if length == 1:
value <<= 8
value |= six.indexbytes(data, -length)
return sign * value
return parse_int
def _float_factory(data):
fmt = _FLOAT_LN_TABLE.get(len(data))
if fmt is None:
raise ValueError('Invalid data length for float: %d' % len(data))
return lambda: unpack(fmt, data)[0]
def _decimal_factory(data):
def parse_decimal():
return _parse_decimal(BytesIO(data))
return parse_decimal
def _timestamp_factory(data):
def parse_timestamp():
end = len(data)
buf = BytesIO(data)
precision = TimestampPrecision.YEAR
off_sign, off_value = _parse_var_int_components(buf, signed=True)
off_value *= off_sign
if off_sign == -1 and off_value == 0:
# -00:00 (unknown UTC offset) is a naive datetime.
tz = None
else:
tz = OffsetTZInfo(timedelta(minutes=off_value))
year = _parse_var_int(buf, signed=False)
if buf.tell() == end:
month = 1
else:
month = _parse_var_int(buf, signed=False)
precision = TimestampPrecision.MONTH
if buf.tell() == end:
day = 1
else:
day = _parse_var_int(buf, signed=False)
precision = TimestampPrecision.DAY
if buf.tell() == end:
hour = 0
minute = 0
else:
hour = _parse_var_int(buf, signed=False)
minute = _parse_var_int(buf, signed=False)
precision = TimestampPrecision.MINUTE
if buf.tell() == end:
second = 0
else:
second = _parse_var_int(buf, signed=False)
precision = TimestampPrecision.SECOND
if buf.tell() == end:
fraction = None
else:
fraction = _parse_decimal(buf)
fraction_exponent = fraction.as_tuple().exponent
if fraction == 0 and fraction_exponent > -1:
# According to the spec, fractions with coefficients of zero and exponents >= zero are ignored.
fraction = None
return Timestamp.adjust_from_utc_fields(
year, month, day,
hour, minute, second, None,
tz,
precision=precision, fractional_precision=None, fractional_seconds=fraction
)
return parse_timestamp
def _symbol_factory(data):
parse_sid = _int_factory(1, data)
def parse_symbol():
sid = parse_sid()
return SymbolToken(None, sid)
return parse_symbol
def _string_factory(data):
return lambda: data.decode('utf-8')
def _lob_factory(data):
# Lobs are a trivial return of the byte data.
return data
#
# Binding Functions
#
# Handler table for type octet to handler co-routine.
_HANDLER_DISPATCH_TABLE = [None] * 256
def _bind_invalid_handlers():
"""Seeds the co-routine table with all invalid handlers."""
for type_octet in range(256):
_HANDLER_DISPATCH_TABLE[type_octet] = partial(_invalid_handler, type_octet)
def _bind_null_handlers():
for tid in _NULLABLE_TIDS:
type_octet = _gen_type_octet(tid, _NULL_LN)
ion_type = _TID_VALUE_TYPE_TABLE[tid]
_HANDLER_DISPATCH_TABLE[type_octet] = partial(_static_scalar_handler, ion_type, None)
def _bind_static_scalar_handlers():
for type_octet, ion_type, value in _STATIC_SCALARS:
_HANDLER_DISPATCH_TABLE[type_octet] = partial(_static_scalar_handler, ion_type, value)
def _bind_length_handlers(tids, user_handler, lns):
"""Binds a set of handlers with the given factory.
Args:
tids (Sequence[int]): The Type IDs to bind to.
user_handler (Callable): A function that takes as its parameters
:class:`IonType`, ``length``, and the ``ctx`` context
returning a co-routine.
lns (Sequence[int]): The low-nibble lengths to bind to.
"""
for tid in tids:
for ln in lns:
type_octet = _gen_type_octet(tid, ln)
ion_type = _TID_VALUE_TYPE_TABLE[tid]
if ln == 1 and ion_type is IonType.STRUCT:
handler = partial(_ordered_struct_start_handler, partial(user_handler, ion_type))
elif ln < _LENGTH_FIELD_FOLLOWS:
# Directly partially bind length.
handler = partial(user_handler, ion_type, ln)
else:
# Delegate to length field parsing first.
handler = partial(_var_uint_field_handler, partial(user_handler, | |
<gh_stars>10-100
import copy
import json
import time
from json import JSONDecodeError
from typing import List
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_any_dtype as is_datetime
from pandas.api.types import is_numeric_dtype
from typeguard import typechecked
from .Report import Report
def validate_attributes(dataframes: List[pd.DataFrame],
dataframes_names: List[str],
numerical_columns: List[str],
categorical_columns: List[str],
date_columns: List[str]):
if len(dataframes) == 0:
raise AttributeError('You need to pass at least one pandas dataframe to create a dashboard.')
if len(dataframes) != len(dataframes_names):
raise AttributeError('You need to have a dataframe name for each dataframe you have in dataframes')
if len(categorical_columns + numerical_columns) == 0:
raise AttributeError(
'You need to pass categorical_columns and/or numerical_columns in order to create a dashboard.')
for df, df_name in zip(dataframes, dataframes_names):
df_columns = df.columns.tolist()
for col in numerical_columns:
if col not in df_columns:
raise AttributeError(f'Numerical column: {col} is not found in {df_name} dataframe.')
for col in categorical_columns:
if col not in df_columns:
raise AttributeError(f'Categorical column: {col} is not found in {df_name} dataframe.')
if date_columns is not None:
for col in date_columns:
if col not in df_columns:
raise AttributeError(f'Date column: {col} is not found in {df_name} dataframe.')
if not is_datetime(pd.to_datetime(df[col], infer_datetime_format=True, errors='ignore')):
raise TypeError(
f'''Date column: {col} has one or more rows which are not a valid date format in {df_name} dataframe.
You can replace invalid values with None''')
def validate_bin_numerical_feature_attributes(dataframes: List[pd.DataFrame],
dataframes_names: List[str],
numerical_feature_name: str,
new_feature_name: str):
for df, df_name in zip(dataframes, dataframes_names):
df_columns = df.columns.tolist()
if numerical_feature_name not in df_columns:
raise AttributeError(f'Numerical column: {numerical_feature_name} is not found in {df_name} dataframe.')
if not is_numeric_dtype(df[numerical_feature_name]):
raise TypeError('''the provided numerical_feature_name is not valid.
Please make sure that you are passing a numerical feature name''')
if len(new_feature_name) == 0:
raise AttributeError('''the provided new_feature_name is not valid.
Please make sure that you are passing new_feature_name as a string with at least one character''')
def load_interactive_dashboard(dashboard_path: str) -> Report:
"""
Load existing dashboard given the dashboard path
:param dashboard_path: file system path
:return: Interactive dashboard
"""
import os
import json
if os.path.exists(path=dashboard_path):
if dashboard_path[-1] in ('/', '\\'):
dashboard_path = dashboard_path[:-1]
report_file_path = f'{dashboard_path}/report_data.json'
if os.path.exists(path=report_file_path):
output_directory, dashboard_folder_name = os.path.split(dashboard_path)
with open(report_file_path) as report_data_file:
try:
report_data = json.load(report_data_file)
title = report_data['title']
dataframes_names = report_data['datasets']
numerical_columns = report_data['numericalColumns'].copy()
if 'generated_id' in numerical_columns:
numerical_columns.remove('generated_id')
categorical_columns = report_data['categoricalColumns']
date_columns = report_data['dateColumns']
number_displays = report_data['numberDisplays']
charts = report_data['charts']
dataframes = []
for dataframe_name in dataframes_names:
dataframes.append(pd.read_json(json.dumps(report_data[dataframe_name])))
dashboard = InteractiveDashboard(title=title,
output_directory=output_directory,
dataframes=dataframes,
dataframes_names=dataframes_names,
numerical_columns=numerical_columns,
categorical_columns=categorical_columns,
date_columns=date_columns,
dashboard_folder_name=dashboard_folder_name)
dashboard.number_displays = number_displays
dashboard.charts = charts
dashboard.report_data = report_data
return dashboard
except JSONDecodeError:
raise ValueError('The provided dashboard JSON file has been encrypted and can not be parsed.')
else:
raise FileNotFoundError(f'report_data.json was not found in {dashboard_path}')
else:
raise NotADirectoryError(f'provided dashboard_path is not valid. dashboard_path does not exist')
@typechecked
class InteractiveDashboard(Report):
"""
InteractiveDashboard creates an interactive dashboard that can be used for EDA or error analysis.
Attributes
----------
title : str
the title of the report
output_directory : str
the directory where the dashboard folder will be created
dataframes : List[pd.DataFrame]
a list dataframes to be used in the dashboard
dataframes_names : List[str]
a list of the dataframes names
numerical_columns : List[str] default=None
a list of the numerical columns to be included in the dashboard
categorical_columns : List[str] default=None
a list of the categorical columns to be included in the dashboard
date_columns : List[str] default=None
a list of the date columns to be included in the dashboard
dashboard_folder_name : str default=None
the name of the folder that will contain all the generated report files.
If not set, the title of the report will be used.
encryption_secret : str default=None
the 16 characters secret that will be used to encrypt the generated report data.
If it is not set, the generated data won't be encrypted.
generate_encryption_secret : bool default=False
the encryption_secret will be generated and its value returned as output.
you can also view encryption_secret to get the generated secret.
Methods
-------
create_dashboard()
creates the dashboard
serve_dashboard_from_local_server()
serves the dashboard using a flask server
save_dashboard()
saves the dashboard to be used without a flask server.
"""
def __init__(self,
title: str,
output_directory: str,
dataframes: List[pd.DataFrame],
dataframes_names: List[str],
numerical_columns: List[str] = [],
categorical_columns: List[str] = [],
date_columns: List[str] = None,
dashboard_folder_name: str = None,
encryption_secret: str = None,
generate_encryption_secret: bool = False):
super().__init__(title,
output_directory,
'',
dashboard_folder_name,
encryption_secret,
generate_encryption_secret)
validate_attributes(dataframes,
dataframes_names,
numerical_columns,
categorical_columns,
date_columns)
self.dataframes = [df.copy() for df in dataframes]
self.dataframes_names = dataframes_names[:]
self.numerical_columns = numerical_columns[:]
self.categorical_columns = categorical_columns[:]
self.date_columns = date_columns[:] if date_columns is not None else []
self.number_displays: List[dict] = []
self.charts: List[dict] = []
self._template_name = 'interactive-dashboard'
self._generated_id_column = 'generated_id'
def create_dashboard(self, auto_generate_distribution_plots: bool = False) -> None:
"""
Creates a dashboard using the user defined data.
:param auto_generate_distribution_plots: generate distribution plots and add them to the dashboard. default: False
"""
# delete default report location created by parent class
if 'report' in self.report_data:
del self.report_data['report']
tic = time.perf_counter()
for df in self.dataframes:
df[self._generated_id_column] = df.index
for date_column in self.date_columns:
df[date_column] = pd.to_datetime(df[date_column], infer_datetime_format=True)
for col in self.categorical_columns:
df[col] = df[col].astype(str)
self.numerical_columns = [self._generated_id_column] + self.numerical_columns
self.report_data['datasets'] = self.dataframes_names
self.report_data['numericalColumns'] = self.numerical_columns if self.numerical_columns else []
self.report_data['categoricalColumns'] = self.categorical_columns if self.categorical_columns else []
self.report_data['dateColumns'] = self.date_columns if self.date_columns else []
for df, df_name in zip(self.dataframes, self.dataframes_names):
self.report_data[df_name] = json.loads(df.to_json(orient='records'))
if auto_generate_distribution_plots:
self.number_displays = self.number_displays + self._generate_number_displays()
self.charts = self.charts + self._generate_charts()
self.report_data['numberDisplays'] = self.number_displays
self.report_data['charts'] = self.charts
toc = time.perf_counter()
print(f"The dashboard was created in {toc - tic:0.4f} seconds")
if self.encryption_secret:
print(f'Your encryption secret is {self.encryption_secret}')
def get_charts(self) -> List[dict]:
"""
Get a copy of the dashboard's charts
:return: List[dict] the charts
"""
return copy.deepcopy(self.charts)
def get_number_displays(self) -> List[dict]:
"""
Get a copy of the dashboard's number displays
:return: List[dict] the number displays
"""
return copy.deepcopy(self.number_displays)
def update_charts(self, new_charts: List[dict], keep_existing=True) -> None:
"""
Update the dashboard charts.
If keep_existing is True, the dashboard's charts will be extended otherwise it will be replaced.
:param new_charts: List of dict representing the new charts
:param keep_existing: boolean to flag whether existing charts should be extended.
:return: None
"""
if keep_existing:
self.charts.extend(copy.deepcopy(new_charts))
else:
self.charts = copy.deepcopy(new_charts)
def update_number_displays(self, new_number_displays: List[dict], keep_existing=True) -> None:
"""
Update the dashboard number displays.
If keep_existing is True, the dashboard's number displays will be extended otherwise it will be replaced.
:param new_number_displays: List of dict representing the new number displays
:param keep_existing: boolean to flag whether existing charts should be extended.
:return: None
"""
if keep_existing:
self.number_displays.extend(copy.deepcopy(new_number_displays))
else:
self.number_displays = copy.deepcopy(new_number_displays)
@typechecked
def bin_numerical_feature(self, numerical_feature_name: str, new_feature_name: str, number_of_bins: int,
suffix: str = None) -> None:
"""
This will be a selected numerical feature. OlliePy will get the bins from the first data frame
and apply these bins on the rest of the dataframes.
:param numerical_feature_name: the numerical feature to bin
:param new_feature_name: the name of the new binned feature
:param number_of_bins: the number of bins to apply
:param suffix: suffix to add the bins value
:return: None
"""
validate_bin_numerical_feature_attributes(self.dataframes,
self.dataframes_names,
numerical_feature_name,
new_feature_name)
first_df = self.dataframes[0]
first_df.loc[:, new_feature_name], bins = pd.cut(first_df.loc[:, numerical_feature_name],
retbins=True, include_lowest=True,
bins=number_of_bins)
self.categorical_columns.append(new_feature_name)
if len(self.dataframes) > 1:
for df in self.dataframes[1:]:
df.loc[:, new_feature_name] = pd.cut(df.loc[:, numerical_feature_name], bins=bins)
if suffix is not None:
for df in self.dataframes:
df.loc[:, new_feature_name] = df.loc[:, new_feature_name].astype(str) + '_' + suffix
def _generate_number_displays(self) -> List[dict]:
"""
generate number displays for the auto generate functionality
:return: List[dict] the generated number displays
"""
return [
{
'agg': 'count',
'column': self._generated_id_column,
'title': 'Number of observations',
'type': 'number-display',
'w': 12,
'h': 1,
'maxH': 2,
'i': 0,
'id': 'number_of_observations_number_display',
'x': 0,
'y': 0,
'static': False
}
]
def _generate_charts(self) -> List[dict]:
"""
generate histograms for the auto generate functionality
:return: List[dict] the generated charts
"""
charts = []
x = 0
y = 0
w = 4
h = 2
i = 0
max_width = 12
df = self.dataframes[0]
for col in self.categorical_columns:
n_unique = df[col].nunique()
if n_unique > 2:
charts.append({
'dimension': col,
'agg': 'count',
'column': self._generated_id_column,
'title': f'{col} count',
'type': 'row-chart',
'cap': 10,
'w': w,
'h': h,
'minW': 2,
'minH': 2,
'i': | |
theme, subtheme, topic, period, difficulty, l_id):
# Only show intro once per login
if context.c.session.get("showed_test_intro"):
page.page_params.set_param("theme", theme)
page.page_params.set_param("subtheme", subtheme)
page.page_params.set_param("topic", topic)
page.page_params.set_param("period", period)
page.page_params.set_param("difficulty", difficulty)
page.page_params.set_param("l_id", l_id)
test = List(page)
url_next, url_skip = test.get_next_question_test_url(Design_default.total_questions)
else:
url_next = page.page_params.create_url(
op = PageOperation.TEST_INTRO,
language = PageLanguage.toStr(page.page_params.get_param("language")), \
theme = theme,
subtheme = subtheme,
topic = topic,
period = period,
difficulty = difficulty,
beta = True if page.page_params.get_param("beta") else None,
l_id = l_id)
return url_next
@staticmethod
def _next_theme_browse_url(page, theme, subtheme, topic, period, difficulty, l_id):
# Only show intro once per login
if context.c.session.get("showed_browse_intro"):
page.page_params.set_param("theme", theme)
page.page_params.set_param("subtheme", subtheme)
page.page_params.set_param("topic", topic)
page.page_params.set_param("period", period)
page.page_params.set_param("difficulty", difficulty)
page.page_params.set_param("l_id", l_id)
test = List(page)
_, url_next = test.get_prev_next_questions_browse_url()
else:
url_next = page.page_params.create_url(
op = PageOperation.BROWSE_INTRO,
language = PageLanguage.toStr(page.page_params.get_param("language")), \
theme = theme,
subtheme = subtheme,
topic = topic,
period = period,
difficulty = difficulty,
beta = True if page.page_params.get_param("beta") else None,
l_id = l_id)
return url_next
# BROWSE
@staticmethod
def _next_theme_url(page, theme, subtheme, topic, period, difficulty, l_id):
return Design_default._next_theme_browse_url(page, theme, subtheme, topic, period, difficulty, l_id)
@staticmethod
@timer_section("render_select_theme_page")
def render_select_theme_page(page):
page.page_params.delete_history()
page.page_params.set_param("theme", "")
page.page_params.set_param("subtheme", "")
page.page_params.set_param("topic", "")
page.page_params.set_param("q_id", "")
page.page_params.set_param("l_id", "")
# Create dictionary entries that define menu
Design_default.add_menu(page)
lang = page.page_params.get_param("language")
content = page.repository.get_content(PageLanguage.toStr(lang))
icon_cnt = 0
if content and page.page_params.get_param("year") in content.keys():
page.template_params["template_name"] = Design_default._add_language(page, "theme.html.j2")
page.template_params['year'] = page.page_params.get_param("year").upper().strip()
try:
int_year = int(page.page_params.get_param("year"))
except:
int_year = 1
pass
page.template_params['year_color'] = Design_default._get_color(int_year)
page.template_params['url_year'] = page.page_params.create_url(
language = PageLanguage.toStr(page.page_params.get_param("language")), \
op = PageOperation.MENU_YEAR,
beta = True if page.page_params.get_param("beta") else None)
page.template_params['themes'] = []
page.template_params["h1"] = page.template_params['year']
page.template_params["h2"] = "Izaberi temu"
# page.add_lines("<div style='width: auto ;margin-left: auto ;margin-right: auto ;'>\n")
# page.add_lines("<h3> {} razred - izaberi oblast</h3>\n".format(page.page_params.get_param("year").title()))
# page.add_lines("</div>\n")
# Sort according to the assigned rank
sorted_themes = []
for theme in sorted(content[page.page_params.get_param("year")].keys()):
if not theme == "level_short":
sorted_themes.append([theme, content[page.page_params.get_param("year")][theme]["rank"]])
sorted_themes.sort(key=lambda x:x[1])
sorted_themes = [x[0] for x in sorted_themes]
#for theme in sorted(content[page.page_params.get_param("year")].keys()):
for theme in sorted_themes:
subtheme_list = []
subtheme_dict = dict()
# Special provisioing for Serbian cyrillic
if lang == PageLanguage.RSC:
theme_o = Transliterate.rs(theme)
else:
theme_o = theme
for subclass in sorted(content[page.page_params.get_param("year")][theme].keys()):
if not subclass == "name" and not subclass == "rank":
subtheme = content[page.page_params.get_param("year")][theme][subclass]["subtheme"].strip()
topic = content[page.page_params.get_param("year")][theme][subclass]["topic"].strip()
# Special provisioing for Serbian cyrillic
if lang == PageLanguage.RSC:
subtheme_o = Transliterate.rs(subtheme)
topic_o = Transliterate.rs(topic)
else:
subtheme_o = subtheme
topic_o = topic
rank_subtheme = content[page.page_params.get_param("year")][theme][subclass]["rank_subtheme"].strip()
rank_topic = content[page.page_params.get_param("year")][theme][subclass]["rank_topic"].strip()
period = content[page.page_params.get_param("year")][theme][subclass]["period"]
if subtheme not in subtheme_dict.keys():
icon_svg = page.repository.get_icon_svg(PageLanguage.toStr(lang), subtheme)
# Different SVGs can have same path IDs (e.g. created in the same program)
# So we change names here
#icon_svg = re.sub(r'id="(.*?)"', 'id="\\1_R_{}"'.format(icon_cnt), icon_svg)
icon_svg = re.sub(r'cls-(.)', 'cld-\\1_R_{}'.format(icon_cnt), icon_svg)
icon_cnt = icon_cnt + 1
subtheme_d = {
'title' : subtheme_o.capitalize(),
'icon' : icon_svg,
'rank_subtheme' : rank_subtheme,
'topics' : [],
'topics_dir' : {},
'min_period' : period,
'link' : Design_default._next_theme_url(
page = page,
theme = theme.title().strip(),
subtheme = subtheme,
topic = "*",
period = "*",
difficulty = "*",
l_id = content[page.page_params.get_param("year")][theme]["name"])
}
subtheme_dict[subtheme] = subtheme_d
subtheme_list.append(subtheme_d)
# BROWSE
link = Design_default._next_theme_test_url(
page = page,
theme = theme.title().strip(),
subtheme = subtheme,
topic = "*",
period = "*",
difficulty = "*",
l_id = content[page.page_params.get_param("year")][theme]["name"])
topic_d = {
# Special provisioing for Serbian cyrillic
'title' : "Sve teme" if not lang == PageLanguage.RSC else "Све теме",
'rank_topic' : "0",
'min_period' : "0",
'link' : link
}
# BROWSE
topic_d['rank_topic'] = "9999"
topic_d['title'] = page.get_messages()['test']
topic_d['color'] = Design_default._get_color(int_year)
topic_d['font-weight'] = 'bolder'
topic_d['font-size'] = '12px'
subtheme_d['topics_dir']["all"] = topic_d
subtheme_d['topics'].append(topic_d)
else:
subtheme_d = subtheme_dict[subtheme]
if topic not in subtheme_d['topics_dir'].keys():
topic_d = {
'title' : topic_o.capitalize(),
'rank_topic' : rank_topic,
'min_period' : period,
'link' : Design_default._next_theme_url(
page = page,
theme = theme.title().strip(),
subtheme = subtheme,
topic = topic,
period = "*",
difficulty = "*",
l_id = content[page.page_params.get_param("year")][theme]["name"])
}
subtheme_d['topics_dir'][topic] = topic_d
subtheme_d['topics'].append(topic_d)
else:
topic_d = subtheme_d['topics_dir'][topic]
subtheme_d['min_period'] = period if period < subtheme_d['min_period'] else subtheme_d['min_period']
topic_d['min_period'] = period if period < topic_d['min_period'] else topic_d['min_period']
# Sort first by period and then alphabetically
#subtheme_list.sort(key=lambda x:x['min_period'] + x['title'])
subtheme_list.sort(key=lambda x:x['rank_subtheme'] + x['title'])
#logging.debug("THEME {}: \n{}\n\n".format(
# theme, [[x['title'], x['min_period']] for x in subtheme_list] ))
for st in subtheme_list:
#st['topics'].sort(key=lambda x:x['min_period'] + x['title'])
st['topics'].sort(key=lambda x:x['rank_topic'] + x['title'])
#logging.debug("SUBTHEME {}: \n{}\n\n".format(
# st['title'], [[x['title'], x['min_period']] for x in st['topics']] ))
page.template_params['themes'].append({
'title' : theme_o.capitalize().strip(),
'link' : Design_default._next_theme_url(
page = page,
theme = theme.title().strip(), \
subtheme = "*", \
topic = "*", \
period = "*", \
difficulty = "*", \
l_id = content[page.page_params.get_param("year")][theme]["name"]),
'subthemes' : subtheme_list
})
else:
page.template_params["template_name"] = Design_default._add_language(page, "error.html.j2")
if not page.page_params.get_param("year") in content.keys():
page.template_params["error_msg"] = "No year {} in content".format(page.page_params.get_param("year"))
else:
page.template_params["error_msg"] = "No content"
@staticmethod
@timer_section("render_confirm_anon_page")
def render_confirm_anon_page(page):
page.template_params["template_name"] = Design_default._add_language(page, "confirm_anon.html.j2")
page.template_params["next"] = page.page_params.create_url(\
op = PageOperation.MENU_YEAR,
language = PageLanguage.toStr(page.page_params.get_param("language")),
beta = True if page.page_params.get_param("beta") else None
)
page.template_params["back"] = page.page_params.create_url(\
op = PageOperation.LOGOUT,
language = PageLanguage.toStr(page.page_params.get_param("language")),
beta = True if page.page_params.get_param("beta") else None
)
@staticmethod
@timer_section("render_select_get_test_started_page")
def render_select_get_test_started_page(page):
page.page_params.delete_history()
page.page_params.set_param("q_id", "")
# Create dictionary entries that define menu
Design_default.add_menu(page)
page.template_params["template_name"] = Design_default._add_language(page, "test_intro.html.j2")
page.template_params["year"] = page.page_params.get_param("year").title()
page.template_params["theme"] = page.page_params.get_param("theme").title()
page.template_params["subtheme"] = page.page_params.get_param("subtheme").title()
page.template_params["topic"] = page.page_params.get_param("topic").title()
page.template_params["period"] = context.c.session.get("period").title()
page.template_params["difficulty"] = context.c.session.get("difficulty").title()
page.template_params["h1"] = page.template_params['year']
page.template_params["h2"] = page.template_params["theme"]
page.template_params["h3"] = page.template_params["topic"]
page.template_params["h4"] = "Start test"
if context.c.session.get("beta"):
page.template_params["beta"] = True
else:
page.template_params["beta"] = False
test = List(page)
url_next, url_skip = test.get_next_question_test_url(Design_default.total_questions)
page.template_params["next"] = url_next
page.template_params["skip"] = url_skip
page.template_params["back"] = page.page_params.create_url(\
op = PageOperation.MENU_THEME,
language = PageLanguage.toStr(page.page_params.get_param("language")),
subtheme = "",
topic = "",
period = "",
difficulty = "",
l_id = "",
beta = True if page.page_params.get_param("beta") else None
)
@staticmethod
def _render_result_bar_and_get_last_difficulty(page):
difficulty = "0"
page.template_params["total_bar"] = {"star1": 0, "star2": 0, "star3": 0, "missed": 0}
page.template_params["type_bar"] = {"star1": 0, "star2": 0, "star3": 0, "missed": 0}
# Count only the best asnwer to each question
questions = {}
if context.c.session.get("history"):
for r in context.c.session.get("history"):
if "difficulty" in r.keys() and (r["difficulty"] == "1" or r["difficulty"] == "2" or r["difficulty"] == "3"):
difficulty = r["difficulty"]
if not r["q_id"] in questions.keys():
questions[r["q_id"]] = r
else:
if r["incorrect"] < questions[r["q_id"]]["incorrect"]:
questions[r["q_id"]]["incorrect"] = r["incorrect"]
questions[r["q_id"]]["correct"] = r["correct"]
# A question is correct if all subquestions are correct
for k, r in questions.items():
if r["difficulty"] == "1":
if r["incorrect"] == 0:
page.template_params["type_bar"]["star1"] = page.template_params["type_bar"]["star1"] + 1
else:
page.template_params["type_bar"]["missed"] = page.template_params["type_bar"]["missed"] + 1
page.template_params["total_bar"]["star1"] = page.template_params["total_bar"]["star1"] + r["correct"]
page.template_params["total_bar"]["missed"] = page.template_params["total_bar"]["missed"] + r["incorrect"]
elif r["difficulty"] == "2":
if r["incorrect"] == 0:
page.template_params["type_bar"]["star2"] = page.template_params["type_bar"]["star2"] + 1
else:
page.template_params["type_bar"]["missed"] = page.template_params["type_bar"]["missed"] + 1
page.template_params["total_bar"]["star2"] = page.template_params["total_bar"]["star2"] + r["correct"]
page.template_params["total_bar"]["missed"] = page.template_params["total_bar"]["missed"] + r["incorrect"]
elif r["difficulty"] == "3":
if r["incorrect"] == 0:
page.template_params["type_bar"]["star3"] = page.template_params["type_bar"]["star3"] + 1
else:
page.template_params["type_bar"]["missed"] = page.template_params["type_bar"]["missed"] + 1
page.template_params["total_bar"]["star3"] = page.template_params["total_bar"]["star3"] + r["correct"]
page.template_params["total_bar"]["missed"] = page.template_params["total_bar"]["missed"] + r["incorrect"]
return difficulty
@staticmethod
# Store page parameters into session
# Used when registering results in order to get good statistics of years/topics/themes/etc
def _store_last_question_into_session(page):
context.c.session.set("last_q_year", page.page_params.get_param("year"))
context.c.session.set("last_q_theme", page.page_params.get_param("theme"))
context.c.session.set("last_q_subtheme", page.page_params.get_param("subtheme"))
context.c.session.set("last_q_topic", page.page_params.get_param("topic"))
@staticmethod
@timer_section("render_test_page")
def render_test_page(page):
test = List(page)
Design_default._store_last_question_into_session(page)
# Create dictionary entries that define menu
Design_default.add_menu(page)
page.template_params["template_name"] = Design_default._add_language(page, "test.html.j2")
q_id = page.page_params.get_param("q_id")
q_number = context.c.session.get("q_num")
try:
q_number = int(q_number) if q_number else 0
except ValueError as ex:
logging.error("Incorrect q_num={}\n{}".format(q_number, helpers.get_stack_trace()))
q_number = 0
skipped = page.page_params.get_param("skipped")
if skipped and isinstance(skipped, str) and skipped.lower() == "true":
try:
context.c.session.get("history")[-1]["skipped"] = True
except:
# I believe this happens when a link with "skipped" parameteris bookmarked,
# so we don't want alerts on this one.
logging.debug("Cannot mark last question as skipped\nhist={}\n{}".format(
context.c.session.get("history"), helpers.get_stack_trace()
))
hist = None
if page.page_params.get_param("op") == PageOperation.TEST_PREV or q_number == test.get_q_number() - 1:
context.c.session.list_delete("history", -1)
# At this point current q_id should match the last one in history,
# otherwise there was an error creating | |
#
# Copyright (c) 2016-2020, Dell Inc. or its subsidiaries.
# All rights reserved.
# See file LICENSE for licensing information.
#
# Module Name:
#
# transport.py
#
# Abstract:
#
# Async event loop, socket handling, and polling mechanisms
#
# Authors: <NAME> (<EMAIL>)
#
from builtins import object
from errno import (
errorcode,
EBADF,
ECONNRESET,
ENOTCONN,
ESHUTDOWN,
ECONNABORTED,
EISCONN,
EINPROGRESS,
EALREADY,
EWOULDBLOCK,
EAGAIN,
)
import select
import socket
import time
_reraised_exceptions = (KeyboardInterrupt, SystemExit)
class Transport(object):
"""
Transport is responsible for managing the underlying socket, registering
for socket events, dispatching read, write, and errors to higher layers.
It is analogous to asyncore.dispatcher and is a drop in replacement for
most purposes.
If the alternate_poller is specified on instantiation, then the connection
will register for events on that poller as opposed to the global poller.
"""
def __init__(self, alternate_poller=None):
self.addr = None
self.connected = False
self.socket = None
self._fileno = None
if alternate_poller is not None:
self.poller = alternate_poller
else:
self.poller = poller # global poller
def create_socket(self, family, type):
"""
Creates the underlying non-blocking socket and associates it with this
Transport's underlying poller
"""
self.family_and_type = family, type
sock = socket.socket(family, type)
sock.setblocking(0)
self.set_socket(sock)
def set_socket(self, sock):
"""
mirror the given Socket sock's file descriptor on the Transport and
register this Transport with the underlying poller
"""
self.socket = sock
self._fileno = sock.fileno()
self.poller.add_channel(self)
def connect(self, address):
"""
begin establishing a connection to the (host, port) address tuple.
must call create_socket first. if the underlying socket is non-blocking
then this command may return before the connection is established.
higher level code should wait for the handle_connect event to signal
that the endpoint is successfully connected
"""
self.connected = False
err = self.socket.connect_ex(address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
return
if err in (0, EISCONN):
self.addr = address
self.handle_connect_event()
else:
raise socket.error(err, errorcode[err])
def close(self):
"""
close the underlying socket connection and unregister this Transport
from the underlying poller
"""
self.socket.close()
self.connected = False
self.poller.del_channel(self)
def send(self, data):
"""
send data bytes over the connection. if the socket would block,
schedule this Transport to be notified when the socket is available
for writing. handle_write will be called in this case.
returns the number of bytes sent or zero if the write would block
"""
result = 0
try:
result = self.socket.send(data)
except socket.error as err:
if err.errno == EAGAIN:
# reschedule the send when the socket is ready
self.poller.defer_write(self)
else:
# raise non-retryable errors
raise
return result
def recv(self, bufsize):
"""
recv bufsize bytes over the connection. if the socket would block,
then return an empty buffer. When the socket is available for reading
handle_read will be called.
returns a string representing the bytes received
"""
result = ""
try:
result = self.socket.recv(bufsize)
if result == "":
raise EOFError("Remote host closed connection")
except socket.error as err:
# raise non-retryable errors
if err.errno != EAGAIN:
raise
return result
def handle_connect_event(self):
"""
called internally when the socket becomes connected
"""
self.connected = True
self.handle_connect()
def handle_connect(self):
"""
callback fired when connection is established
"""
pass
def handle_read(self):
"""
callback fired when the socket has data available
"""
pass
def handle_write(self):
"""
callback fired when the socket is available for writing
note: unlike asyncore, write notifications are not provided by default.
this is a performance optimization because the socket is usually
available for writing, and the application usually knows when it wants
to write. There is no point in filling the event queues with
write ready messages that will be ignored if the client has no data to
send.
Instead, applications are expected to implement handle_write, but to
call it directly when data is to be sent. IF the socket would block,
EALREADY will be handled by the Transport. The Transport requests a
single write notification from the pollwer; when received, handle_write
will be called once signalling that the socket may now be ready to retry
If the application would prefer to be notified when the socket is ready
to write, transport.poller.defer_write(transport) may be called to
schedule a single handle_write callback.
"""
pass
def handle_close(self):
"""
callback fired when the socket is closed
"""
pass
def handle_error(self):
"""
callback fired if a non-recoverable exception is raised
"""
pass
class BasePoller(object):
"""
A poller is an underlying event monitoring system. This generic class
can be built upon to implement efficient file descriptor polling methods
which are available on various platforms.
A minimal subclass must implement the poll() function which performs a
single iteration of the event loop across all monitored Transports and
calls process_readables and process_writables with the correct values.
Subclasses should, in most cases call, into BasePoller methods in order
to maintain proper accounting structures. The exception is when the poller
handles accounting itself.
"""
def __init__(self):
"""
initialize the poller and register any kernel global structures
necessary to monitor the file descriptors
"""
self.connections = {}
self.deferred_writers = set()
def add_channel(self, transport):
"""
begin monitoring the transport socket for read/connect events
the underlying poller should not monitor Transports for writability
except when:
* the Transport's connection has not yet been established
* the Transport has been passed as an argument to defer_write
"""
self.connections[transport._fileno] = transport
transport.poller = self
def del_channel(self, transport):
"""
stop monitoring the transport socket
"""
del self.connections[transport._fileno]
def defer_write(self, transport):
"""
defers a write on the given transport. once the async poller determines
that the transport can be written to, handle_write will be called
"""
self.deferred_writers.add(transport._fileno)
def loop(self, timeout=None, count=None):
"""
enter the async event loop for the given timeout or number of iterations
"""
start = time.time()
complete_iterations = 0
while True:
if count is not None and complete_iterations >= count:
break
self.poll()
if timeout is not None and time.time() > start + timeout:
break
complete_iterations += 1
def poll(self):
"""
Must be implemented by subclasses to execute a single iteration of the
event loop. Based on the outcome of the events, the following actions
MUST be performed
* process_readables is called with a list of file descriptors which
have data available for reading
* process_writables is called with a list of file descriptors which
have data available for writing
"""
raise NotImplementedError("BasePoller does not have a polling mechanism")
def process_readables(self, readables):
"""
call handle_read on each applicable fd in the readables sequence and
subsequently handle_error if any exception is raised or handle_close
if the underlying socket is no longer connected
"""
for fileno in readables:
t = self.connections[fileno]
try:
t.handle_read()
except socket.error as e:
if e.args[0] not in (
EBADF,
ECONNRESET,
ENOTCONN,
ESHUTDOWN,
ECONNABORTED,
):
t.handle_error()
else:
t.handle_close()
except _reraised_exceptions:
raise
except:
t.handle_error()
def process_writables(self, writables):
"""
for each Transport t corresponding to an fd in the writables sequence,
if t is not marked as connected, call handle_connect_event
otherwise call handle_write and remove the Transport from the set
of deferred writers
process close and error events if exception is encountered
"""
for fileno in writables:
t = self.connections[fileno]
try:
if not t.connected:
t.handle_connect_event()
else:
if fileno in self.deferred_writers:
self.deferred_writers.remove(fileno)
t.handle_write()
except socket.error as e:
if e.args[0] not in (
EBADF,
ECONNRESET,
ENOTCONN,
ESHUTDOWN,
ECONNABORTED,
):
t.handle_error()
else:
t.handle_close()
except _reraised_exceptions:
raise
except:
t.handle_error()
class KQueuePoller(BasePoller):
"""
Implementation of KQueue, available on Mac OS and BSD derivatives
"""
def __init__(self):
super(KQueuePoller, self).__init__()
self.kq = select.kqueue()
self.batch_size = 10
def add_channel(self, transport):
super(KQueuePoller, self).add_channel(transport)
events = [
select.kevent(
transport._fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE,
),
select.kevent(
transport._fileno,
filter=select.KQ_FILTER_WRITE,
flags=(select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_ONESHOT),
),
]
self.kq.control(events, 0)
def defer_write(self, transport):
super(KQueuePoller, self).defer_write(transport)
events = [
select.kevent(
transport._fileno,
filter=select.KQ_FILTER_WRITE,
flags=(select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_ONESHOT),
)
]
self.kq.control(events, 0)
def poll(self):
events = self.kq.control(None, self.batch_size, 0)
readables = []
writables = []
for ev in events:
if ev.filter == select.KQ_FILTER_READ:
readables.append(ev.ident)
elif ev.filter == select.KQ_FILTER_WRITE:
writables.append(ev.ident)
self.process_readables(readables)
self.process_writables(writables)
class SelectPoller(BasePoller):
"""
Implementation of select, available on most platforms as a fallback.
Roughly equivalent performance to using asyncore
"""
def poll(self):
non_connected = [
t._fileno | |
all goal fields before alignment.'
logger.info(msg)
return
raw_goals.append(goal)
logger.info("Starting %s procedure with goals %s",
self.procedure, raw_goals)
self.install_pick_cam()
self.auto_switch_cam = True
alignment = self.alignments[self.procedure]
for key_set in alignment:
yags = [self.loader[key]['imager'] for key in key_set]
mots = [self.loader[key]['mirror'] for key in key_set]
rots = [self.loader[key].get('rotation')
for key in key_set]
# Make sure nominal positions are correct
for mot in mots:
try:
mot.nominal_position = self.config_cache[mot.name]
except KeyError:
pass
mot_rbv = 'pitch'
# We need to select det_rbv and interpret goals based on
# the camera rotation, converting things to the unrotated
# coordinates.
det_rbv = []
goals = []
for rot, yag, goal in zip(rots, yags, raw_goals):
rot_info = ad_stats_x_axis_rot(yag, rot)
det_rbv.append(rot_info['key'])
modifier = rot_info['mod_x']
if modifier is not None:
goal = modifier - goal
goals.append(goal)
first_steps = self.settings_cache['first_step']
tolerances = self.settings_cache['tolerance']
average = self.settings_cache['averages']
timeout = self.settings_cache['timeout']
tol_scaling = self.settings_cache['tol_scaling']
extra_stage = []
close_fee_att = self.settings_cache['close_fee_att']
if close_fee_att and not self.sim:
extra_stage.append(self.fee_att())
# Temporary fix: undo skywalker's goal mangling.
# TODO remove goal mangling from skywalker.
goals = [480 - g for g in goals]
plan = skywalker(yags, mots, det_rbv, mot_rbv, goals,
first_steps=first_steps,
tolerances=tolerances,
averages=average, timeout=timeout,
sim=self.sim, use_filters=not self.sim,
tol_scaling=tol_scaling,
extra_stage=extra_stage)
self.initialize_RE()
self.RE(plan)
elif self.RE.state == 'paused':
logger.info("Resuming procedure.")
self.install_pick_cam()
self.auto_switch_cam = True
self.RE.resume()
except:
logger.exception('Error in running procedure')
finally:
self.auto_switch_cam = False
@pyqtSlot()
def on_pause_button(self):
"""
Slot for the pause button. This brings us from the running state to the
paused state.
"""
self.auto_switch_cam = False
if self.RE.state == 'running':
logger.info("Pausing procedure.")
try:
self.RE.request_pause()
except:
logger.exception("Error on pause.")
@pyqtSlot()
def on_abort_button(self):
"""
Slot for the abort button. This brings us from any state to the idle
state.
"""
self.auto_switch_cam = False
if self.RE.state != 'idle':
logger.info("Aborting procedure.")
try:
self.RE.abort()
except:
logger.exception("Error on abort.")
@pyqtSlot()
def on_slits_button(self):
"""
Slot for the slits procedure. This checks the slit fiducialization.
"""
try:
logger.info('Starting slit check process.')
image_to_check = []
slits_to_check = []
# First, check the slit checkboxes.
for img_obj, slit_obj, goal_group in zip(self.imagers_padded(),
self.slits_padded(),
self.goals_groups):
if slit_obj is not None and goal_group.is_checked:
image_to_check.append(img_obj)
slits_to_check.append(slit_obj)
if not slits_to_check:
logger.info('No valid slits selected!')
return
logger.info('Checking the following slits: %s',
[slit.name for slit in slits_to_check])
self.install_pick_cam()
self.auto_switch_cam = True
slit_width = self.settings_cache['slit_width']
samples = self.settings_cache['samples']
def plan(img, slit, rot, output_obj, slit_width=slit_width,
samples=samples):
rot_info = ad_stats_x_axis_rot(img, rot)
det_rbv = rot_info['key']
fidu = slit_scan_fiducialize(slit, img, centroid=det_rbv,
x_width=slit_width,
samples=samples)
output = yield from fidu
modifier = rot_info['mod_x']
if modifier is not None:
output = modifier - output
output_obj[img.name] = output
self.initialize_RE()
results = {}
for img, slit in zip(image_to_check, slits_to_check):
systems = self.loader.get_systems_with(img.name)
objs = self.loader.get_subsystem(systems[0])
rotation = objs.get('rotation', 0)
this_plan = plan(img, slit, rotation, results)
wrapped = run_wrapper(this_plan)
wrapped = stage_wrapper(wrapped, [img, slit])
self.RE(wrapped)
logger.info('Slit scan found the following goals: %s', results)
if self.ui.slit_fill_check.isChecked():
logger.info('Filling goal fields automatically.')
for img, fld in zip(self.imagers_padded(), self.goals_groups):
if img is not None:
try:
fld.value = round(results[img.name], 1)
except KeyError:
pass
except:
logger.exception('Error on slits button')
finally:
self.auto_switch_cam = False
@pyqtSlot()
def on_save_mirrors_button(self):
try:
if self.nominal_config is None:
logger.info('No config file chosen.')
else:
logger.info('Saving mirror positions.')
self.save_active_mirrors()
self.cache_config()
except:
logger.exception('Error on saving mirrors')
@pyqtSlot()
def on_save_goals_button(self):
try:
logger.info('Saving goals.')
self.save_active_goals()
self.cache_config()
except:
logger.exception('Error on saving goals')
@pyqtSlot()
def on_settings_button(self):
try:
pos = self.ui.mapToGlobal(self.settings_button.pos())
dialog_return = self.settings.dialog_at(pos)
if dialog_return == QDialog.Accepted:
self.cache_settings()
self.save_settings()
logger.info('Settings saved.')
elif dialog_return == QDialog.Rejected:
self.restore_settings()
logger.info('Changes to settings cancelled.')
except:
logger.exception('Error on opening settings')
@pyqtSlot(int)
def on_move_nominal_button(self, index):
try:
nominal_positions = self.read_config() or {}
try:
mirror = self.mirrors()[index]
except IndexError:
logger.exception('Mirror index out of range')
return
try:
pos = nominal_positions[mirror.name]
except KeyError:
logger.info('No mirror position saved')
return
logger.info('Moving %s to %s', mirror.name, pos)
mirror.move(pos)
except Exception:
logger.exception('Misc error on move nominal button')
def initialize_RE(self):
"""
Set up the RunEngine for the current cached settings.
"""
self.RE.clear_suspenders()
min_beam = self.settings_cache['min_beam']
min_rate = self.settings_cache['min_rate']
if min_beam is not None:
self.RE.install_suspender(BeamEnergySuspendFloor(min_beam, sleep=5,
averages=100))
if min_rate is not None:
self.RE.install_suspender(BeamRateSuspendFloor(min_rate, sleep=5))
def fee_att(self):
try:
att = self._fee_att
except AttributeError:
att = FeeAtt()
self._fee_att = att
return att
def cache_settings(self):
"""
Pull settings from the settings object to the local cache.
"""
self.settings_cache = self.settings.values
def restore_settings(self):
"""
Push settings from the local cache into the settings object.
"""
self.settings.values = self.settings_cache
def save_settings(self):
"""
Write settings from the local cache to disk.
"""
pass
def load_settings(self):
"""
Load settings from disk to the local cache.
"""
pass
def install_pick_cam(self):
"""
For every camera that we've successfully loaded, subscribe the pick_cam
method if we haven't done so already.
"""
try:
installed = self.installed
except AttributeError:
installed = set()
self.installed = installed
for system in self.loader.cache.values():
imager = system['imager']
if imager not in installed:
imager.subscribe(self.pick_cam, event_type=imager.SUB_STATE,
run=False)
installed.add(imager)
def pick_cam(self, *args, **kwargs):
"""
Callback to switch the active imager as the procedures progress.
"""
if self.auto_switch_cam:
with self.cam_lock:
chosen_imager = None
for img in self.imagers():
pos = img.position
if pos == "Unknown":
return
elif pos == "IN":
chosen_imager = img
break
combo = self.ui.image_title_combo
if chosen_imager is not None:
name = chosen_imager.name
if name != combo.currentText():
logger.info('Automatically switching cam to %s', name)
index = self.all_imager_names.index(name)
combo.setCurrentIndex(index)
def read_config(self):
if self.nominal_config is not None:
try:
with open(self.nominal_config, 'r') as f:
d = json.load(f)
except:
return None
return d
return None
def save_config(self, d):
if self.nominal_config is not None:
with open(self.nominal_config, 'w') as f:
json.dump(d, f)
def cache_config(self):
d = self.read_config()
if d is not None:
self.config_cache.update(d)
def save_goal(self, goal_group):
if goal_group.value is None:
logger.info('No value to save for this goal.')
return
d = self.read_config() or {}
d[goal_group.text()] = goal_group.value
self.save_config(d)
def save_active_goals(self):
text = []
values = []
for i, goal_group in enumerate(self.goals_groups):
if i >= len(self.active_system()):
break
val = goal_group.value
if val is not None:
values.append(val)
text.append(goal_group.text())
d = self.read_config() or {}
for t, v in zip(text, values):
d[t] = v
self.save_config(d)
def save_mirror(self, mirror_group):
d = self.read_config() or {}
mirror = mirror_group.obj
d[mirror.name] = mirror.position
self.save_config(d)
def save_active_mirrors(self):
saves = {}
averages = 1000
all_mirrors = self.mirrors()
for mirror in all_mirrors:
saves[mirror.name] = 0
for i in range(averages):
for mirror in all_mirrors:
saves[mirror.name] += mirror.position/averages
logger.info('Saving positions: %s', saves)
read = self.read_config() or {}
read.update(saves)
self.save_config(read)
def active_system(self):
"""
List of system keys that are part of the active procedure.
"""
active_system = []
if self.procedure != 'None':
for part in self.alignments[self.procedure]:
active_system.extend(part)
return active_system
def load_active_system(self):
for system in self.active_system():
self.loader.get_subsystem(system)
def _objs(self, key):
objs = []
for act in self.active_system():
subsystem = self.loader[act]
if subsystem is None:
objs.append(None)
else:
objs.append(subsystem[key])
return objs
def mirrors(self):
"""
List of active mirror objects.
"""
return self._objs('mirror')
def imagers(self):
"""
List of active imager objects.
"""
return self._objs('imager')
def slits(self):
"""
List of active slits objects.
"""
return self._objs('slits')
def goals(self):
"""
List of goals in the user entry boxes, or None for empty or invalid
goals.
"""
return [goal.value for goal in self.goals_groups]
def goal(self):
"""
The goal associated with the visible imager, or None if the visible
imager is not part of the active procedure.
"""
index = self.procedure_index()
if index is None:
return None
else:
return self.goals()[index]
def procedure_index(self):
"""
Goal index of the active imager, or None if the visible imager is not
part of the active procedure.
"""
try:
return self.imagers_padded().index(self.image_obj)
except ValueError:
return None
def none_pad(self, obj_list):
"""
Helper function to extend a list with 'None' objects until it's the
length of MAX_MIRRORS.
"""
padded = []
padded.extend(obj_list)
while len(padded) < MAX_MIRRORS:
padded.append(None)
return padded
def mirrors_padded(self):
return self.none_pad(self.mirrors())
def imagers_padded(self):
return self.none_pad(self.imagers())
def slits_padded(self):
return self.none_pad(self.slits())
def get_widget_set(self, name, num=MAX_MIRRORS):
"""
Widgets that come in sets of count MAX_MIRRORS are named carefully so
we can use this macro to grab related widgets.
Parameters
----------
name: str
Base name of widget set e.g. 'name'
num: int, optional
Number of widgets to return
Returns
-------
widget_set: list
List of widgets e.g. 'name_1', 'name_2', 'name_3'...
"""
widgets = []
for | |
<gh_stars>1-10
# --------------------------------------------------------
# Written by <NAME> (https://github.com/JudyYe)
# --------------------------------------------------------
from __future__ import print_function
import os
import os.path as osp
from typing import Tuple
import numpy as np
import torch
import torch.nn.functional as F
import pytorch3d.ops as ops_3d
from pytorch3d.renderer import MeshRasterizer, SfMPerspectiveCameras, TexturesVertex, MeshRenderer, SoftGouraudShader, \
DirectionalLights, RasterizationSettings, get_world_to_view_transform
from pytorch3d.renderer.mesh.rasterizer import Fragments
from pytorch3d.renderer.mesh.utils import _interpolate_zbuf, _clip_barycentric_coordinates
from pytorch3d.structures import Meshes
import pytorch3d.structures.utils as struct_utils
from pytorch3d.transforms import Transform3d, Rotate
from nnutils import geom_utils
def cubify(vox_world, th=0.1, detach_vox=True) -> Meshes:
"""
scale range from -0.5, 0.5
:param vox_world: (N, C, D, H, W)
:param th:
:return:
"""
if not torch.is_tensor(vox_world):
W = vox_world.shape[-1]; N = vox_world.shape[0]
vox_world = torch.FloatTensor(vox_world).view(N, 1, W, W, W).cuda()
if detach_vox:
vox_world = vox_world.detach()
meshes = ops_3d.cubify(vox_world.squeeze(1), th, align='corner')
meshes = meshes.scale_verts_(0.5)
return meshes
def param_to_7dof_batcch(param, f=375, use_scale=False, use_rho=False):
"""
:param param: (N, 6)
:param f: scaler
:param use_scale:
:param use_rho:
:return:
"""
N, C = param.size()
azel, scale, trans = torch.split(param, [2, 1, 3], dim=1)
zeros = torch.zeros_like(scale)
if not use_scale:
scale = zeros + 1
if not use_rho:
trans = torch.cat([zeros, zeros, zeros + calc_rho(f)], dim=1)
f = zeros + f
new_param = torch.cat([azel, scale, trans, f], dim=1)
return new_param
def calc_rho(f):
base_f = 1.875
base_rho = 2
rho = base_rho * f / base_f
return rho
def view_vox2mesh_py3d(view):
"""
:param view: (N, 7)
:return: (N, ), (N, 3, 3), (N, 3)
"""
view = view.clone()
view, f = torch.split(view, [6, 1], dim=1)
view[:, 0] = -view[:, 0]
f = (f * 2).squeeze(1)
scale, trans, rot = geom_utils.azel2uni(view, homo=False)
return f, rot, trans
def param7dof_to_camera(view_params) -> SfMPerspectiveCameras:
"""
:param view_params: (N, 7)
:return: SfM cameras
"""
f, rot, trans = view_vox2mesh_py3d(view_params)
cameras = SfMPerspectiveCameras(focal_length=f, R=rot, T=trans, device=view_params.device)
return cameras
def render_meshify_voxel(voxels, out_size, view_param, th=0.05):
meshes = cubify(voxels, th)
try:
recon = render_mesh(meshes, out_size, view_param)
except:
print('No mesh')
N = voxels.size(0)
recon = {'image': torch.zeros(N, 3, out_size, out_size)}
return recon
def render_mesh(meshes: Meshes, out_size, view_param, texture=None, **kwargs):
N, V, _ = meshes.verts_padded().size()
if meshes.textures is None:
if texture is None:
texture = torch.zeros([N, V, 3]).to(view_param) + 1 # torch.FloatTensor([[[175., 175., 175.]]]).to(view_param) / 255
meshes.textures = pad_texture(meshes, texture)
cameras = param7dof_to_camera(view_param)
raster_settings = kwargs.get('raster_settings', RasterizationSettings(image_size=out_size))
rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
shader = SoftGouraudShader(device=meshes.device, lights=ambient_light(meshes.device, view_param))
renderer = MeshRenderer(
rasterizer=rasterizer,
shader=shader
)
if 'zfar' not in kwargs:
kwargs['zfar']= view_param[:, -2].view(N, 1, 1, 1) + 1
if 'znear' not in kwargs:
kwargs['znear'] = view_param[:, -2].view(N, 1, 1, 1) - 1
image = renderer(meshes, cameras=cameras, **kwargs)
image = torch.flip(image, dims=[-3])
image = image.transpose(-1, -2).transpose(-2, -3) # H, 4, W --> 4, H, W
rgb, mask = torch.split(image, [image.size(1) - 1, 1], dim=1) # [0-1]
image = image * 2 - 1
# rgb, mask = torch.split(output, [3, 1], dim=1)
return {'image': rgb, 'mask': mask, 'rgba': image}
def render_normals(meshes: Meshes, out_size, view_param, **kwargs):
N, V, _ = meshes.verts_padded().size()
# clone mesh to and replace texture with normals in camera space
meshes = meshes.clone()
world_normals = meshes.verts_normals_padded()
cameras = param7dof_to_camera(view_param) # real camera
trans_world_to_view = cameras.get_world_to_view_transform()
view_normals = trans_world_to_view.transform_normals(world_normals)
# place view normal as textures
meshes.textures = pad_texture(meshes, view_normals)
raster_settings = kwargs.get('raster_settings', RasterizationSettings(image_size=out_size))
rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
# make the ambient color full range
shader = SoftGouraudShader(device=meshes.device, lights=ambient_light(meshes.device, view_param, color=(1, 0, 0)))
renderer = MeshRenderer(
rasterizer=rasterizer,
shader=shader
)
if 'zfar' not in kwargs:
kwargs['zfar']= view_param[:, -2].view(N, 1, 1, 1) + 1
if 'znear' not in kwargs:
kwargs['znear'] = view_param[:, -2].view(N, 1, 1, 1) - 1
image = renderer(meshes, cameras=cameras, **kwargs)
image = torch.flip(image, dims=[-3])
image = image.transpose(-1, -2).transpose(-2, -3) # H, 4, W --> 4, H, W
rgb, mask = torch.split(image, [image.size(1) - 1, 1], dim=1) # [0-1]
# align w/ my def
# flip r (x), b (z)
rgb[:, 0] *= -1
rgb[:, 2] *= -1
# mask out bg
rgb = rgb * mask
# and normalize rgb to unit vector.
rgb = F.normalize(rgb, dim=1) # N, 3, H, W
# rgb, mask = torch.split(output, [3, 1], dim=1)
return {'normal': rgb, 'mask': mask, 'rgba': image}
def pad_texture(meshes: Meshes, feature: torch.Tensor) -> TexturesVertex:
"""
:param meshes:
:param feature: (sumV, C)
:return:
"""
if isinstance(feature, TexturesVertex):
return feature
if feature.dim() == 2:
feature = struct_utils.packed_to_list(feature, meshes.num_verts_per_mesh().tolist())
# feature = struct_utils.list_to_padded(feature, pad_value=-1)
texture = TexturesVertex(feature)
texture._num_faces_per_mesh = meshes.num_faces_per_mesh().tolist()
texture._num_verts_per_mesh = meshes.num_verts_per_mesh().tolist()
texture._N = meshes._N
texture.valid = meshes.valid
return texture
def ambient_light(device='cpu', param_view=None, **kwargs):
amb = 0.6
if param_view is None:
d = get_light_direction(param_view)
else:
d = ((0, -0.6, 0.8), )
color = kwargs.get('color', np.array([0.65, 0.3, 0.0]))
am, df, sp = color
ambient_color=((am, am, am), ),
diffuse_color=((df, df, df),),
specular_color=((sp, sp, sp), ),
lights = DirectionalLights(
device=device,
ambient_color=ambient_color,
diffuse_color=diffuse_color,
specular_color=specular_color,
direction=d,
)
return lights
def get_light_direction(view_params):
"""same el, opposite az"""
N = view_params.size(0)
az, el, _ = torch.split(view_params, [1, 1, view_params.size(-1) - 2], dim=-1)
az = -az # np.pi
rot = geom_utils.azel2rot(az, el, False) # (N, 3, 3)
unit = torch.zeros([N, 3, 1]).to(az)
unit[:, 2] += 1 # z += 1
unit = torch.matmul(rot, unit).squeeze(-1)
return -unit
def get_soft_rasterizer_setting(**kwargs):
sigma = kwargs.get('sigma', 1e-4)
raster_settings_soft = RasterizationSettings(
image_size=kwargs.get('image_size', 224),
blur_radius=np.log(1. / 1e-4 - 1.)*sigma,
faces_per_pixel=kwargs.get('faces_per_pixel', 10),
perspective_correct=False,
)
return raster_settings_soft
def get_local_feat(meshes: Meshes, rasterizer: MeshRasterizer, local_feat, view, sym=1, vis=True
) -> Tuple[torch.Tensor, torch.Tensor]:
_, screen_meshes, img_vis = transform_to_screen_vis(meshes, rasterizer, view, True)
verts_feat = sample_local_feat(local_feat, screen_meshes, img_vis, vis=vis)
if sym > 0:
sym_index = find_sym_index(meshes, dim=0)
verts_feat = symmetrify_verts_feat(verts_feat, img_vis, sym_index, mode=sym)
img_vis = symmetrify_verts_feat(img_vis, img_vis, sym_index, mode=1)
return verts_feat, img_vis
def transform_to_screen_vis(meshes: Meshes, rasterizer: MeshRasterizer, view, vis=True):
fragments, screen_meshes = rasterize_wrapper(rasterizer, meshes, view)
img_vis = get_vis_verts(meshes, fragments, visible=vis)
return fragments, screen_meshes, img_vis
def get_vis_verts(world_meshes: Meshes, fragment: Fragments, visible=True,
) -> torch.Tensor:
device = world_meshes.device
if visible:
# consider visible
face_inds = fragment.pix_to_face[...,0] # (N, H, W, K) LongTensor in packed faces
valid_inds = face_inds > -1
face_valid_inds = torch.masked_select(face_inds, valid_inds) # nFvalid
world_meshes.faces_padded()
faces = world_meshes.faces_packed() # (nF, 3)
vis_verts_inds = faces[face_valid_inds, :].view(-1) # (nFvalid * 3)
visible_verts = torch.zeros([world_meshes.verts_packed().size(0)]).to(vis_verts_inds) # (sumV, )
visible_verts.scatter_(0, vis_verts_inds, 1.)
visible_verts = visible_verts.unsqueeze(-1)
else:
visible_verts = torch.ones([world_meshes.verts_packed().size(0), 1]).to(device) # (sumV, )
return visible_verts
def symmetrify_verts_feat(verts_feat, wgt, sym_index, dim=0, mode=1, eps=1e-6) -> torch.Tensor:
"""
:param verts_feat: packed feature (sumV, C)
:param wgt: packed wgt (sumV, C)
:param sym_index: meshes or symmetric index.
:return:
"""
if isinstance(sym_index, Meshes):
sym_index = find_sym_index(sym_index, dim)
flip_feat = torch.gather(verts_feat, 0, sym_index.expand(verts_feat.size()))
flip_w = torch.gather(wgt, 0, sym_index)
if mode == 1:
# all avg.
verts_feat = (verts_feat * wgt + flip_feat * flip_w) / (flip_w + wgt + eps)
elif mode == 2:
# 1,1. 1,0, 0,0 -> myself. 0,1 -> symmetry
mask = (1-wgt) * flip_w
verts_feat = (1-mask) * verts_feat + mask * flip_feat
return verts_feat
def find_sym_index(world_meshes, dim=0):
world_verts = world_meshes.verts_padded()
flip_world_verts = world_verts.clone()
flip_world_verts[..., dim] = -flip_world_verts[..., dim]
l1 = world_meshes.num_verts_per_mesh()
_, p1_index, _ = ops_3d.knn_points(world_verts, flip_world_verts, l1, l1)
# convert from padded to packed.
offset = world_meshes.mesh_to_verts_packed_first_idx() # (N, )
p1_index = p1_index.squeeze(-1) + offset.unsqueeze(-1) # (N, maxV)
p1_index = p1_index.view(-1, 1)
packed_index = world_meshes.verts_padded_to_packed_idx().unsqueeze(-1) # 1D. (sumV, 1)
p1_packed_index = torch.gather(p1_index, 0, packed_index) # (sumV, 1)
return p1_packed_index
def rasterize_wrapper(rasterizer: MeshRasterizer, meshes: Meshes, param_view: torch.Tensor) -> (Fragments, Meshes):
"""
from pytroch3d, fix bugs in z_buffer
:param rasterizer:
:param meshes: Meshes in world coordinate
:param param_view: (N, 7)
:return: Fragment, and Meshes in screen space. Z is in camera space for now.
"""
cameras = param7dof_to_camera(param_view)
fragments = rasterizer(meshes, cameras=cameras)
# cmt: no idea what these do. copy from pytorch3d/mesh/renderer.py:forward
raster_settings = rasterizer.raster_settings
if raster_settings.blur_radius > 0.0:
print('no')
# TODO: potentially move barycentric clipping to the rasterizer
# if no downstream functions requires unclipped values.
# This will avoid unnecssary re-interpolation of the z buffer.
meshes_screen = rasterizer.transform(meshes, cameras=cameras)
clipped_bary_coords = _clip_barycentric_coordinates(
fragments.bary_coords
)
clipped_zbuf = _interpolate_zbuf(
fragments.pix_to_face, clipped_bary_coords, meshes_screen
)
fragments = Fragments(
bary_coords=clipped_bary_coords,
zbuf=clipped_zbuf,
dists=fragments.dists,
pix_to_face=fragments.pix_to_face,
)
meshes_screen = transform_verts(meshes, cameras=cameras)
return fragments, meshes_screen
def sample_local_feat(img_feats, screen_meshes: Meshes, visible_verts,
interp_mode: str = "bilinear",
padding_mode: str = "zeros",
align_corners: bool = False,
vis: bool = True,
bg_value=0.) -> Tuple[torch.Tensor, torch.Tensor]:
"""if vis: invisible verts are getting bg_value. """
verts_feat = ops_3d.vert_align(img_feats, screen_meshes,
return_packed=True, interp_mode=interp_mode,
padding_mode=padding_mode, | |
<gh_stars>1-10
import sys
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
import random
from scipy import sparse
from scipy.special import comb
from scipy.special import gammaln
from scipy.special import erfcx
from scipy.stats import norm
import scipy.stats
import seaborn
import csv
import pandas as pd
import pickle
from collections import defaultdict
import operator
from scipy.sparse import csr_matrix
import itertools
import os.path
import os
from joblib import Parallel, delayed
from common import parse_config, get_chrom_size
""" This script runs Large Average Submatrix algorithm on Hi-C contact matrices.
Output is contiguous submatrices with a large average for each chromosome pair.
Adjust the threshold for this algorithm in the configuration file run_params.json if necessary."""
def simulate_data():
data = np.random.randn(20, 40)
x = 2.5*np.random.randn(5,3) + 10
data[2:7, 3:6] = x
return data
def transform(data):
data_trans = np.log(1 + data)
return data_trans
def rescale(data, mean, std):
# convert to z scores
z = (data-mean)/std
return z
def check_submatrix_below_threshold(sub_matrix, threshold):
# check whether score is below threshold based on the the size of submatrix
num_rows, num_cols = sub_matrix.shape
avg_sub_matrix = np.sqrt(num_rows*num_cols)*np.average(sub_matrix)
#threshold = scipy.stats.norm.ppf(percentile_threshold)
print avg_sub_matrix, threshold
if (avg_sub_matrix > threshold):
check = True
else:
check = False
return check
def residual(u,data, rows, cols):
avg = np.mean(u)
# only subtract avg for submatrix
data[np.ix_(rows,cols)] = data[np.ix_(rows,cols)] - avg
return data
def large_average_submatrix_adj(data, chr1, chr2, threshold_new):
# store some data on iterations
dir = config["HIC_NEW_DIR"] + str(chr1) + '_' + str(chr2) + '/'
if (os.path.isdir(dir) == False):
os.makedirs(dir)
# algorithm until score falls below threshold
continue_search = True
iter = 0
# store matrices
start_rows, stop_rows, start_cols, stop_cols, best_score_list, avg_list = [], [], [], [], [], []
while (continue_search):
rows, cols, sub_matrix, best_score = search_main(data, dir, iter)
data = residual(sub_matrix, data, rows, cols)
# check whether score is below threshold based on the the size of submatrix
continue_search = check_submatrix_below_threshold(sub_matrix, threshold_new)
if (continue_search == True):
start_rows.append(rows[0])
stop_rows.append(rows[-1])
start_cols.append(cols[0])
stop_cols.append(cols[-1])
best_score_list.append(best_score)
avg_list.append(np.average(sub_matrix))
iter = iter + 1
print 'Best score = ', best_score
print 'Average = ', np.average(sub_matrix)
print rows[0], rows[-1], cols[0], cols[-1]
return start_rows, stop_rows, start_cols, stop_cols, best_score_list, avg_list
def search_main(data, dir, iter):
num_iter = 100
# keep track of submatrix params that you get out
search_attributes = np.empty((num_iter, 5))
for iteration in range(num_iter):
start_row, k, start_col, l, curr_score = search(data)
search_attributes[iteration] = start_row, k, start_col,l ,curr_score
# save the iterations
np.savetxt(dir + 'sub_matrix' + str(iter) + '.txt', search_attributes)
best_start_row, best_k, best_start_col, best_l, best_score = search_attributes[np.argmax(search_attributes[:,4])]
rows = np.arange(best_start_row, best_start_row + best_k, dtype = 'int')
cols = np.arange(best_start_col, best_start_col + best_l, dtype = 'int')
sub_matrix = data[np.ix_(rows,cols)]
return rows, cols, sub_matrix, best_score
def score(u, data):
m,n = data.shape
k,l = u.shape
tau = np.mean(u)
sc = comb(m,k)*comb(n,l)*norm.cdf(-tau*np.sqrt(k*l))
sc = -np.log(sc)
return sc
def score_sum(sum_u, k,l, data):
m,n = data.shape
cnr = gammaln(m + 1) - gammaln(k+1) - gammaln(m-k+1)
cnc = gammaln(n + 1) - gammaln(l+1) - gammaln(n-l+1)
ar = sum_u/np.sqrt(k*l)
rest2 = -(ar*ar)/2.0 + np.log(erfcx(ar/np.sqrt(2))*0.5)
sc = -rest2 -cnr - cnc
return sc
def grouped_sum(array, N):
length = len(array) - N + 1
# initialize the array
adj_sum = np.zeros((length))
for i in range(0,N):
adj_sum = adj_sum + array[i:length+i]
return adj_sum
def search(data):
# run search procedure with fixed k, l first
max_num_rows = int(10000000.0/config["HIC_RESOLN"])
max_num_cols = int(10000000.0/config["HIC_RESOLN"])
k = random.randint(1, max_num_rows)
l = random.randint(1, max_num_cols)
row_set, col_set = search_fixed_k_l(data, k,l)
# allow k and l to vary
# initialize the running average
pre_score = -1000000
curr_score = 0
# iterate until convergence
while(pre_score != curr_score):
# sum across columns
row_summed = np.sum(col_set, axis =1)
start_row, k, score_rows = enumerate_adj_submatrix_scores(data, row_summed, max_num_rows, k, l, 'row')
# make a row set
row_set = data[start_row:start_row+k, :]
# columns
col_summed = np.sum(row_set, axis =0)
start_col, l, score_cols = enumerate_adj_submatrix_scores(data, col_summed, max_num_cols, k, l, 'col')
# make a col set
col_set = data[:,start_col:start_col+l]
# update scores
pre_score = curr_score
curr_score = score_cols
#print 'Score = ', pre_score, curr_score
return start_row, k, start_col, l, curr_score
def enumerate_adj_submatrix_scores(data, row_summed, max_num_rows, k, l, row_or_col):
if (row_or_col == 'row'):
start_row_best_list = []
start_row_best_ind_list = []
# let the number of rows to include vary (+1 to make the range inclusive)
possible_num_rows = range(1, max_num_rows + 1)
for i in possible_num_rows:
# make all possible submatrices by summing adjacent rows
adj_row_sum = grouped_sum(row_summed, i)
score_list = [score_sum(sum_u, i, l, data) for sum_u in adj_row_sum]
# find best starting row
start_row_best_ind, start_row_best = max(enumerate(score_list), key=operator.itemgetter(1))
start_row_best_ind_list.append(start_row_best_ind)
start_row_best_list.append(start_row_best)
if (row_or_col == 'col'):
start_row_best_list = []
start_row_best_ind_list = []
possible_num_rows = range(1, max_num_rows + 1)
for i in possible_num_rows:
# make all possible submatrices by summing adjacent rows
adj_row_sum = grouped_sum(row_summed, i)
# LINE BELOW is THE ONLY DIFFENECE BETWEEN ROW AND COL CODE
score_list = [score_sum(sum_u, k, i, data) for sum_u in adj_row_sum]
# find best starting row
start_row_best_ind, start_row_best = max(enumerate(score_list), key=operator.itemgetter(1))
start_row_best_ind_list.append(start_row_best_ind)
start_row_best_list.append(start_row_best)
# choose the best scoring
ind, score_rows = max(enumerate(start_row_best_list), key=operator.itemgetter(1))
start_row = start_row_best_ind_list[ind]
k = possible_num_rows[ind]
return start_row, k, score_rows
def search_fixed_k_l(data, k, l):
# initialize (select l adjacent columns at random)
num_rows = data.shape[0]
num_cols = data.shape[1]
# choose a random starting position for column
start_col = random.randint(0, num_cols-l)
col_set = data[:,start_col:start_col+l]
# initialize the running average
pre_avg = -1000000
curr_avg = 0
# iterate until convergence
while(pre_avg != curr_avg):
# get k rows with the largest sum over l columns (adjacent rows)
# make another matrix that is the sum of k adjacent columnns
row_summed_data = np.asarray([np.sum(col_set[i:i+k,:]) for i in range(0, col_set.shape[0]-k+1)])
# choose starting row that gave the largest sum
start_row = np.argmax(row_summed_data)
row_set = data[start_row:start_row+k, :]
# get l rows with the largest sum over k rows (adjacent columns)
# make another matrix that is the sum of l adjacent rows
col_summed_data = np.asarray([np.sum(row_set[:,j:j+l]) for j in range(0, row_set.shape[1]-l+1)])
# choose starting row that gave the largest sum
start_col = np.argmax(col_summed_data)
col_set = data[:,start_col:start_col+l]
# compute the new average of the submatrix
sub_matrix = data[np.ix_(range(start_row, start_row+k), range(start_col, start_col+l))]
# update averages
pre_avg = curr_avg
curr_avg = np.mean(sub_matrix)
#print curr_avg, pre_avg
return row_set, col_set
def df_remove_zeros_rows_cols(df):
# remove indices of matrix that are all zero col or row wise
# drop rows that are all 0
df = df[(df.T != 0).any()]
# drop columns that are all 0
df = df[df.columns[(df != 0).any()]]
return df
def get_hic_matrix(hic_filename, chr1, chr2):
# construct matrix where each axis corresponds to position along the chromosome
# this matrix will be rectangular b/c sizes of interchromosomal matrices are not the same
# returns scipy sparse matrix
data = np.loadtxt(hic_filename, delimiter = '\t')
row_ind = data[:,0]/config["HIC_RESOLN"]
col_ind = data[:,1]/config["HIC_RESOLN"]
contact_values = data[:,2]
# obtain chromosome sizes
chr1_size = get_chrom_size(chr1, config["GENOME_DIR"])/config["HIC_RESOLN"] + 1
chr2_size = get_chrom_size(chr2, config["GENOME_DIR"])/config["HIC_RESOLN"] + 1
hic_matrix = csr_matrix((contact_values, (row_ind, col_ind)), shape = (chr1_size, chr2_size))
hic_dense = np.asarray(hic_matrix.todense())
row_labels = np.arange(chr1_size)*config["HIC_RESOLN"]
col_labels = np.arange(chr2_size)*config["HIC_RESOLN"]
df = pd.DataFrame(hic_dense, index = row_labels, columns = col_labels)
# get rid of nans
df = df.fillna(0)
return df
def map_pos2rownum(df, row_pos):
return np.where(df.index.values == row_pos)[0][0]
def map_pos2colnum(df, row_pos):
return np.where(df.columns.values == str(int(row_pos)))[0][0]
def map_rownum2pos(df, row_num):
positions = df.index.values
return positions[row_num]
def map_colnum2pos(df, col_num):
positions = df.columns.values
return float(positions[col_num])
def map_num2pos(df, start_rows, stop_rows, start_cols, stop_cols):
# for each row and column number figure out what position on the genome does it correspond to
start_row_pos = [map_rownum2pos(df, row_num) for row_num in start_rows]
stop_row_pos = [map_rownum2pos(df, row_num) for row_num in stop_rows]
start_col_pos = [map_colnum2pos(df, col_num) for col_num in start_cols]
stop_col_pos = [map_colnum2pos(df, col_num) for col_num in stop_cols]
return start_row_pos, stop_row_pos, start_col_pos, stop_col_pos
def numclust_avg(pair):
chr1, chr2 = pair
fname = config["HIC_NEW_DIR"] + 'intermingling_regions.chr' + str(chr1) + '_chr' + str(chr2) + '.csv'
# check if the file exists
if (os.path.isfile(fname) == True):
df_intermingling = pd.read_csv(fname, index_col = 0)
plt.figure()
plt.plot(xrange(df_intermingling.shape[0]), df_intermingling['score'], 'o-')
plt.xlabel('Cluster #')
plt.ylabel('Score')
plt.savefig(config["HIC_NEW_DIR"] + 'cluster_score.chr' + str(chr1) + '_chr' + str(chr2) + '.png')
plt.close()
plt.figure()
plt.plot(xrange(df_intermingling.shape[0]), df_intermingling['avg'], 'o-')
plt.xlabel('Cluster #')
plt.ylabel('Average')
plt.savefig(config["HIC_NEW_DIR"] + 'cluster_average.chr' + str(chr1) + '_chr' + str(chr2) + '.png')
plt.close()
def determine_min_max_hic():
# get the minimum and maximum transformed Hi-C contact for plotting
chr_pairs = list(itertools.combinations(config["chrs"], 2))
min_list = []
max_list = []
for pair in chr_pairs:
chr1, chr2 = pair
fname = config["HIC_NEW_DIR"] + 'intermingling_regions.chr' + str(chr1) + '_chr' + str(chr2) + '.csv'
if (os.path.isfile(fname) == True):
# read in hic matrix
hic_filename = config["HIC_FILT_DIR"] +'chr' + str(chr1) + '_chr' + str(chr2) + '.zscore.txt'
df = pd.read_csv(hic_filename, index_col = 0)
data = df.as_matrix()
min_chr_pair = np.min(data)
max_chr_pair = np.max(data)
min_list.append(min_chr_pair)
max_list.append(max_chr_pair)
minl = min(min_list)
maxl = max(max_list)
return minl, maxl
def draw_identified_LASregions(pair, minl, maxl):
chr1, chr2 = pair
plt.rc('font', family='serif')
# no gridlines
seaborn.set_style("dark", {'axes.grid':False})
numclust = 50
fname = config["HIC_NEW_DIR"] + 'intermingling_regions.chr' + str(chr1) + '_chr' + str(chr2) + '.csv'
# check if the file exists
if (os.path.isfile(fname) == True):
hic_filename = config["HIC_FILT_DIR"] +'chr' + str(chr1) + '_chr' + str(chr2) + '.zscore.txt'
df = pd.read_csv(hic_filename, index_col = 0)
data = df.as_matrix()
#plt.figure(figsize = (100, 100))
plt.figure()
plt.imshow(data, cmap = 'Reds', vmin = minl, vmax = maxl)
cbar = plt.colorbar()
#cbar.set_label('log(1+x) transformed rescaled HiC observed contacts')
cbar.set_label('Transformed Hi-C contacts', fontsize = 12)
cbar.solids.set_rasterized(True)
# label ticks with genomic position (Mb)
xaxis = range(0, df.shape[1], 100)
xlabels = [str(map_colnum2pos(df, x)/1000000.0) for x in xaxis]
plt.xticks(xaxis, xlabels)
yaxis = range(0, df.shape[0], 100)
ylabels = [str(map_rownum2pos(df, y)/1000000.0) for y in yaxis]
plt.yticks(yaxis, ylabels)
plt.xlabel('chr' + str(chr2) + ' (Mb)', fontsize = 14)
plt.ylabel('chr' + str(chr1) + ' (Mb)', fontsize = 14)
#plt.savefig(config["HIC_NEW_DIR"] + 'hic_transformed_rescaled.chr' + str(chr1) + '_chr' + str(chr2) + '.png')
plt.savefig(config["HIC_NEW_DIR"] + 'hic_transformed_rescaled.chr' + str(chr1) + '_chr' + str(chr2) + 'commonscale.png')
df_intermingling = pd.read_csv(fname, index_col = 0)
# iterate over all las regions found
for num in range(0, len(df_intermingling)):
region = df_intermingling.iloc[num]
start_row = map_pos2rownum(df, region['start | |
considered equal
PBC: a list of periodic axes (1,2,3)->(x,y,z)
Returns:
a single index corresponding to the detected Wyckoff position. If no
valid Wyckoff position is found, returns False
"""
index = check_wyckoff_position(points, wyckoffs, w_symm_all, exact_translation=exact_translation, PBC=PBC)
if index is not False:
generators = wyckoffs[index]
point = find_generating_point(points, generators, PBC=PBC)
if point is not None:
j, k = jk_from_i(index, orientations)
if orientations[j][k] != []:
return index
else:
return False
else:
print("(Inside check_wyckoff_position_molecular)")
print("Error: Could not generate points from Wyckoff generators")
print("wp_index: "+str(index))
print("Coordinates:")
for c in points:
print(c)
print("Generators:")
for g in generators:
print(g.as_xyz_string())
return False
def merge_coordinate_molecular(coor, lattice, wyckoffs, w_symm_all, tol, orientations, PBC=[1,2,3]):
"""
Given a list of fractional coordinates, merges them within a given
tolerance, and checks if the merged coordinates satisfy a Wyckoff
position. Used for merging general Wyckoff positions into special Wyckoff
positions within the random_crystal (and its derivative) classes.
Args:
coor: a list of fractional coordinates
lattice: a 3x3 matrix representing the unit cell
wyckoffs: an unorganized list of Wyckoff positions to check
w_symm_all: A list of Wyckoff site symmetry obtained from
get_wyckoff_symmetry
tol: the cutoff distance for merging coordinates
orientations: a list of valid molecular orientations within the
space group
PBC: a list of periodic axes (1,2,3)->(x,y,z)
Returns:
coor, index: (coor) is the new list of fractional coordinates after
merging, and index is a single index of the Wyckoff position within
the spacegroup. If merging is unsuccesful, or no index is found,
returns the original coordinates and False
"""
while True:
pairs, graph = find_short_dist(coor, lattice, tol, PBC=PBC)
index = None
valid = True
if len(pairs)>0 and valid is True:
if len(coor) > len(wyckoffs[-1]):
merged = []
groups = connected_components(graph)
for group in groups:
merged.append(get_center(coor[group], lattice, PBC=PBC))
merged = np.array(merged)
index = check_wyckoff_position_molecular(merged, orientations, wyckoffs, w_symm_all, exact_translation=False, PBC=PBC)
if index is False:
return coor, False
elif index is None:
valid = False
else:
#Check each possible merged Wyckoff position for orientaitons
coor = merged
else:#no way to merge
return coor, False
else:
if index is None:
index = check_wyckoff_position_molecular(coor, orientations, wyckoffs, w_symm_all, exact_translation=False, PBC=PBC)
return coor, index
def choose_wyckoff_molecular(wyckoffs, number, orientations):
"""
Choose a Wyckoff position to fill based on the current number of molecules
needed to be placed within a unit cell
Rules:
1) The new position's multiplicity is equal/less than (number).
2) We prefer positions with large multiplicity.
3) The site must admit valid orientations for the desired molecule.
Args:
wyckoffs: an unsorted list of Wyckoff positions
number: the number of molecules still needed in the unit cell
orientations: the valid orientations for a given molecule. Obtained
from get_sg_orientations, which is called within molecular_crystal
Returns:
a single index for the Wyckoff position. If no position is found,
returns False
"""
if np.random.random()>0.5: #choose from high to low
for j, wyckoff in enumerate(wyckoffs):
if len(wyckoff[0]) <= number:
good_wyckoff = []
for k, w in enumerate(wyckoff):
if orientations[j][k] != []:
good_wyckoff.append([j,k])
if len(good_wyckoff) > 0:
return choose(good_wyckoff)
return False
else:
good_wyckoff = []
for j, wyckoff in enumerate(wyckoffs):
if len(wyckoff[0]) <= number:
for k, w in enumerate(wyckoff):
if orientations[j][k] != []:
good_wyckoff.append([j,k])
if len(good_wyckoff) > 0:
return choose(good_wyckoff)
else:
return False
class mol_site():
"""
Class for storing molecular Wyckoff positions and orientations within
the molecular_crystal class. Each mol_site object represenents an
entire Wyckoff position, not necessarily a single molecule.
"""
def __init__(self, mol, position, wp, wp_generators, lattice, PBC=[1,2,3]):
self.mol = mol
"""A Pymatgen molecule object"""
self.position = position
"""Relative coordinates of the molecule's center within the unit cell"""
self.wp = wp
"""The Wyckoff position for the site"""
self.wp = wp_generators
"""The Wyckoff pgenerators for the site"""
self.multiplicity = len(wp)
"""The multiplicity of the molecule's Wyckoff position"""
self.PBC = PBC
"""The periodic boundary condition direction"""
class molecular_crystal():
"""
Class for storing and generating molecular crystals based on symmetry
constraints. Based on the crystal.random_crystal class for atomic crystals.
Given a spacegroup, list of molecule objects, molecular stoichiometry, and
a volume factor, generates a molecular crystal consistent with the given
constraints. This crystal is stored as a pymatgen struct via self.struct
Args:
sg: The international spacegroup number
molecules: a list of pymatgen.core.structure.Molecule objects for
each type of molecule
numMols: A list of the number of each type of molecule within the
primitive cell (NOT the conventioal cell)
volume_factor: A volume factor used to generate a larger or smaller
unit cell. Increasing this gives extra space between molecules
allow_inversion: Whether or not to allow chiral molecules to be
inverted. If True, the final crystal may contain mirror images of
the original molecule. Unless the chemical properties of the mirror
image are known, it is highly recommended to keep this value False
orientations: Once a crystal with the same spacegroup and molecular
stoichiometry has been generated, you may pass its
valid_orientations attribute here to avoid repeating the
calculation, but this is not required
check_atomic_distances: If True, checks the inter-atomic distances
after each Wyckoff position is added. This requires slightly more
time, but vastly improves accuracy. For approximately spherical
molecules, or for large inter-molecular distances, this may be
turned off
"""
def __init__(self, sg, molecules, numMols, volume_factor, allow_inversion=False, orientations=None, check_atomic_distances=True):
#Necessary input
self.Msgs()
"""A list of warning messages to use during generation."""
self.PBC = [1,2,3]
numMols = np.array(numMols) #must convert it to np.array
self.factor = volume_factor
"""The supplied volume factor for the unit cell."""
self.numMols0 = numMols
self.sg = sg
"""The international spacegroup number of the crystal."""
#Reorient the molecules along their principle axes
oriented_molecules = []
#Allow support for generating molecules from text via openbable
for i, mol in enumerate(molecules):
if type(mol) == str:
#Read strings into molecules, try collection first,
#If string not in collection, use SMILES format
try:
mo = molecule_collection[mol]
except:
mo = ob_mol_from_string(mol)
mo = pmg_from_ob(mo)
molecules[i] = mo
for mol in molecules:
pga = PointGroupAnalyzer(mol)
mo = pga.symmetrize_molecule()['sym_mol']
oriented_molecules.append(mo)
self.molecules = oriented_molecules
"""A list of pymatgen.core.structure.Molecule objects, symmetrized and
oriented along their symmetry axes."""
self.boxes = []
"""A list of bounding boxes for each molecule. Used for estimating
volume of the unit cell."""
self.radii = []
"""A list of approximated radii for each molecule type. Used for
checking inter-molecular distances."""
for mol in self.molecules:
self.boxes.append(get_box(reoriented_molecule(mol)[0]))
max_r = 0
for site in mol:
radius = math.sqrt( site.x**2 + site.y**2 + site.z**2 )
if radius > max_r: max_r = radius
self.radii.append(max_r+1.0)
self.numMols = numMols * cellsize(self.sg)
"""The number of each type of molecule in the CONVENTIONAL cell"""
self.volume = estimate_volume_molecular(self.numMols, self.boxes, self.factor)
"""The volume of the generated unit cell"""
self.wyckoffs = get_wyckoffs(self.sg)
"""The Wyckoff positions for the crystal's spacegroup."""
self.wyckoffs_organized = get_wyckoffs(self.sg, organized=True)
"""The Wyckoff positions for the crystal's spacegroup. Sorted by
multiplicity."""
self.w_symm = get_wyckoff_symmetry(self.sg, molecular=True)
"""A list of site symmetry operations for the Wyckoff positions, obtained
from get_wyckoff_symmetry."""
self.wyckoff_generators = get_wyckoff_generators(self.sg)
"""A list of Wyckoff generators (molecular=False)"""
self.wyckoff_generators_m = get_wyckoff_generators(self.sg, molecular=True)
"""A list of Wyckoff generators (molecular=True)"""
self.check_atomic_distances = check_atomic_distances
"""Whether or not inter-atomic distances are checked at each step."""
self.allow_inversion = allow_inversion
"""Whether or not to allow chiral molecules to be inverted."""
#When generating multiple crystals of the same stoichiometry and sg,
#allow the user to re-use the allowed orientations, to reduce time cost
if orientations is None:
self.get_orientations()
else:
self.valid_orientations = orientations
"""The valid orientations for each molecule and Wyckoff position.
May be copied when generating a new molecular_crystal to save a
small amount of time"""
self.generate_crystal()
def Msgs(self):
self.Msg1 = 'Error: the stoichiometry is incompatible with the wyckoff sites choice'
self.Msg2 = 'Error: failed in the cycle of generating structures'
self.Msg3 = 'Warning: failed in the cycle of adding species'
self.Msg4 = 'Warning: failed in the cycle of choosing wyckoff sites'
self.Msg5 = 'Finishing: added the specie'
self.Msg6 = 'Finishing: added the whole structure'
def get_orientations(self):
"""
Calculates the valid orientations for each Molecule and Wyckoff
position. Returns a list with 4 indices:
index | |
<reponame>theanht1/rasa<filename>tests/test_train.py
import logging
import secrets
import sys
import tempfile
import os
from pathlib import Path
from typing import Text, Dict, Any
from unittest.mock import Mock
import pytest
from _pytest.capture import CaptureFixture
from _pytest.logging import LogCaptureFixture
from _pytest.monkeypatch import MonkeyPatch
from rasa.core.policies.ted_policy import TEDPolicy
import rasa.model
import rasa.core
import rasa.nlu
from rasa.nlu.classifiers.diet_classifier import DIETClassifier
import rasa.shared.importers.autoconfig as autoconfig
import rasa.shared.utils.io
from rasa.core.agent import Agent
from rasa.core.interpreter import RasaNLUInterpreter
from rasa.nlu.model import Interpreter
from rasa.train import train_core, train_nlu, train, dry_run_result
from rasa.utils.tensorflow.constants import EPOCHS
from tests.conftest import DEFAULT_CONFIG_PATH, DEFAULT_NLU_DATA, AsyncMock
from tests.core.conftest import DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE
from tests.test_model import _fingerprint
@pytest.mark.parametrize(
"parameters",
[
{"model_name": "test-1234", "prefix": None},
{"model_name": None, "prefix": "core-"},
{"model_name": None, "prefix": None},
],
)
@pytest.mark.trains_model
def test_package_model(trained_rasa_model: Text, parameters: Dict):
output_path = tempfile.mkdtemp()
train_path = rasa.model.unpack_model(trained_rasa_model)
model_path = rasa.model.package_model(
_fingerprint(),
output_path,
train_path,
parameters["model_name"],
parameters["prefix"],
)
assert os.path.exists(model_path)
file_name = os.path.basename(model_path)
if parameters["model_name"]:
assert parameters["model_name"] in file_name
if parameters["prefix"]:
assert parameters["prefix"] in file_name
assert file_name.endswith(".tar.gz")
def count_temp_rasa_files(directory: Text) -> int:
return len(
[
entry
for entry in os.listdir(directory)
if not any(
[
# Ignore the following files/directories:
entry == "__pycache__", # Python bytecode
entry.endswith(".py") # Temp .py files created by TF
# Anything else is considered to be created by Rasa
]
)
]
)
@pytest.mark.trains_model
def test_train_temp_files(
tmp_path: Path,
monkeypatch: MonkeyPatch,
default_domain_path: Text,
default_stories_file: Text,
default_stack_config: Text,
default_nlu_data: Text,
):
(tmp_path / "training").mkdir()
(tmp_path / "models").mkdir()
monkeypatch.setattr(tempfile, "tempdir", tmp_path / "training")
output = str(tmp_path / "models")
train(
default_domain_path,
default_stack_config,
[default_stories_file, default_nlu_data],
output=output,
force_training=True,
)
assert count_temp_rasa_files(tempfile.tempdir) == 0
# After training the model, try to do it again. This shouldn't try to train
# a new model because nothing has been changed. It also shouldn't create
# any temp files.
train(
default_domain_path,
default_stack_config,
[default_stories_file, default_nlu_data],
output=output,
)
assert count_temp_rasa_files(tempfile.tempdir) == 0
@pytest.mark.trains_model
def test_train_core_temp_files(
tmp_path: Path,
monkeypatch: MonkeyPatch,
default_domain_path: Text,
default_stories_file: Text,
default_stack_config: Text,
):
(tmp_path / "training").mkdir()
(tmp_path / "models").mkdir()
monkeypatch.setattr(tempfile, "tempdir", tmp_path / "training")
train_core(
default_domain_path,
default_stack_config,
default_stories_file,
output=str(tmp_path / "models"),
)
assert count_temp_rasa_files(tempfile.tempdir) == 0
@pytest.mark.trains_model
def test_train_nlu_temp_files(
tmp_path: Path,
monkeypatch: MonkeyPatch,
default_stack_config: Text,
default_nlu_data: Text,
):
(tmp_path / "training").mkdir()
(tmp_path / "models").mkdir()
monkeypatch.setattr(tempfile, "tempdir", tmp_path / "training")
train_nlu(default_stack_config, default_nlu_data, output=str(tmp_path / "models"))
assert count_temp_rasa_files(tempfile.tempdir) == 0
def test_train_nlu_wrong_format_error_message(
capsys: CaptureFixture,
tmp_path: Path,
monkeypatch: MonkeyPatch,
default_stack_config: Text,
incorrect_nlu_data: Text,
):
(tmp_path / "training").mkdir()
(tmp_path / "models").mkdir()
monkeypatch.setattr(tempfile, "tempdir", tmp_path / "training")
train_nlu(default_stack_config, incorrect_nlu_data, output=str(tmp_path / "models"))
captured = capsys.readouterr()
assert "Please verify the data format" in captured.out
@pytest.mark.trains_model
def test_train_nlu_with_responses_no_domain_warns(tmp_path: Path):
data_path = "data/test_nlu_no_responses/nlu_no_responses.yml"
with pytest.warns(UserWarning) as records:
train_nlu(
"data/test_config/config_response_selector_minimal.yml",
data_path,
output=str(tmp_path / "models"),
)
assert any(
"You either need to add a response phrase or correct the intent"
in record.message.args[0]
for record in records
)
@pytest.mark.trains_model
def test_train_nlu_with_responses_and_domain_no_warns(tmp_path: Path):
data_path = "data/test_nlu_no_responses/nlu_no_responses.yml"
domain_path = "data/test_nlu_no_responses/domain_with_only_responses.yml"
with pytest.warns(None) as records:
train_nlu(
"data/test_config/config_response_selector_minimal.yml",
data_path,
output=str(tmp_path / "models"),
domain=domain_path,
)
assert not any(
"You either need to add a response phrase or correct the intent"
in record.message.args[0]
for record in records
)
def test_train_nlu_no_nlu_file_error_message(
capsys: CaptureFixture,
tmp_path: Path,
monkeypatch: MonkeyPatch,
default_stack_config: Text,
):
(tmp_path / "training").mkdir()
(tmp_path / "models").mkdir()
monkeypatch.setattr(tempfile, "tempdir", tmp_path / "training")
train_nlu(default_stack_config, "", output=str(tmp_path / "models"))
captured = capsys.readouterr()
assert "No NLU data given" in captured.out
@pytest.mark.trains_model
def test_trained_interpreter_passed_to_core_training(
monkeypatch: MonkeyPatch, tmp_path: Path, unpacked_trained_rasa_model: Text
):
# Skip actual NLU training and return trained interpreter path from fixture
# Patching is bit more complicated as we have a module `train` and function
# with the same name 😬
monkeypatch.setattr(
sys.modules["rasa.train"],
"_train_nlu_with_validated_data",
AsyncMock(return_value=unpacked_trained_rasa_model),
)
# Mock the actual Core training
_train_core = AsyncMock()
monkeypatch.setattr(rasa.core, "train", _train_core)
train(
DEFAULT_DOMAIN_PATH_WITH_SLOTS,
DEFAULT_CONFIG_PATH,
[DEFAULT_STORIES_FILE, DEFAULT_NLU_DATA],
str(tmp_path),
)
_train_core.assert_called_once()
_, _, kwargs = _train_core.mock_calls[0]
assert isinstance(kwargs["interpreter"], RasaNLUInterpreter)
@pytest.mark.trains_model
def test_interpreter_of_old_model_passed_to_core_training(
monkeypatch: MonkeyPatch, tmp_path: Path, trained_rasa_model: Text
):
# NLU isn't retrained
monkeypatch.setattr(
rasa.model.FingerprintComparisonResult,
rasa.model.FingerprintComparisonResult.should_retrain_nlu.__name__,
lambda _: False,
)
# An old model with an interpreter exists
monkeypatch.setattr(
rasa.model, rasa.model.get_latest_model.__name__, lambda _: trained_rasa_model
)
# Mock the actual Core training
_train_core = AsyncMock()
monkeypatch.setattr(rasa.core, "train", _train_core)
train(
DEFAULT_DOMAIN_PATH_WITH_SLOTS,
DEFAULT_CONFIG_PATH,
[DEFAULT_STORIES_FILE, DEFAULT_NLU_DATA],
str(tmp_path),
)
_train_core.assert_called_once()
_, _, kwargs = _train_core.mock_calls[0]
assert isinstance(kwargs["interpreter"], RasaNLUInterpreter)
def test_load_interpreter_returns_none_for_none():
from rasa.train import _load_interpreter
assert _load_interpreter(None) is None
def test_interpreter_from_previous_model_returns_none_for_none():
from rasa.train import _interpreter_from_previous_model
assert _interpreter_from_previous_model(None) is None
def test_train_core_autoconfig(
tmp_path: Path,
monkeypatch: MonkeyPatch,
default_domain_path: Text,
default_stories_file: Text,
default_stack_config: Text,
):
monkeypatch.setattr(tempfile, "tempdir", tmp_path)
# mock function that returns configuration
mocked_get_configuration = Mock()
monkeypatch.setattr(autoconfig, "get_configuration", mocked_get_configuration)
# skip actual core training
monkeypatch.setattr(
sys.modules["rasa.train"], "_train_core_with_validated_data", AsyncMock()
)
# do training
train_core(
default_domain_path,
default_stack_config,
default_stories_file,
output="test_train_core_temp_files_models",
)
mocked_get_configuration.assert_called_once()
_, args, _ = mocked_get_configuration.mock_calls[0]
assert args[1] == autoconfig.TrainingType.CORE
def test_train_nlu_autoconfig(
tmp_path: Path,
monkeypatch: MonkeyPatch,
default_stack_config: Text,
default_nlu_data: Text,
):
monkeypatch.setattr(tempfile, "tempdir", tmp_path)
# mock function that returns configuration
mocked_get_configuration = Mock()
monkeypatch.setattr(autoconfig, "get_configuration", mocked_get_configuration)
monkeypatch.setattr(
sys.modules["rasa.train"], "_train_nlu_with_validated_data", AsyncMock()
)
# do training
train_nlu(
default_stack_config,
default_nlu_data,
output="test_train_nlu_temp_files_models",
)
mocked_get_configuration.assert_called_once()
_, args, _ = mocked_get_configuration.mock_calls[0]
assert args[1] == autoconfig.TrainingType.NLU
def mock_async(monkeypatch: MonkeyPatch, target: Any, name: Text) -> Mock:
mock = Mock()
async def mock_async_func(*args: Any, **kwargs: Any) -> None:
mock(*args, **kwargs)
monkeypatch.setattr(target, name, mock_async_func)
return mock
def mock_core_training(monkeypatch: MonkeyPatch) -> Mock:
return mock_async(monkeypatch, rasa.core, rasa.core.train.__name__)
def mock_nlu_training(monkeypatch: MonkeyPatch) -> Mock:
return mock_async(monkeypatch, rasa.nlu, rasa.nlu.train.__name__)
def new_model_path_in_same_dir(old_model_path: Text) -> Text:
return str(Path(old_model_path).parent / (secrets.token_hex(8) + ".tar.gz"))
class TestE2e:
@pytest.mark.trains_model
def test_e2e_gives_experimental_warning(
self,
monkeypatch: MonkeyPatch,
trained_e2e_model: Text,
default_domain_path: Text,
default_stack_config: Text,
default_e2e_stories_file: Text,
default_nlu_data: Text,
caplog: LogCaptureFixture,
):
mock_nlu_training(monkeypatch)
mock_core_training(monkeypatch)
with caplog.at_level(logging.WARNING):
train(
default_domain_path,
default_stack_config,
[default_e2e_stories_file, default_nlu_data],
output=new_model_path_in_same_dir(trained_e2e_model),
)
assert any(
[
"The end-to-end training is currently experimental" in record.message
for record in caplog.records
]
)
@pytest.mark.trains_model
def test_models_not_retrained_if_no_new_data(
self,
monkeypatch: MonkeyPatch,
trained_e2e_model: Text,
default_domain_path: Text,
default_stack_config: Text,
default_e2e_stories_file: Text,
default_nlu_data: Text,
):
mocked_nlu_training = mock_nlu_training(monkeypatch)
mocked_core_training = mock_core_training(monkeypatch)
train(
default_domain_path,
default_stack_config,
[default_e2e_stories_file, default_nlu_data],
output=new_model_path_in_same_dir(trained_e2e_model),
)
mocked_core_training.assert_not_called()
mocked_nlu_training.assert_not_called()
@pytest.mark.trains_model
def test_retrains_nlu_and_core_if_new_e2e_example(
self,
monkeypatch: MonkeyPatch,
trained_e2e_model: Text,
default_domain_path: Text,
default_stack_config: Text,
default_e2e_stories_file: Text,
default_nlu_data: Text,
tmp_path: Path,
):
stories_yaml = rasa.shared.utils.io.read_yaml_file(default_e2e_stories_file)
stories_yaml["stories"][1]["steps"].append({"user": "new message!"})
new_stories_file = tmp_path / "new_stories.yml"
rasa.shared.utils.io.write_yaml(stories_yaml, new_stories_file)
mocked_nlu_training = mock_nlu_training(monkeypatch)
mocked_core_training = mock_core_training(monkeypatch)
new_model_path = train(
default_domain_path,
default_stack_config,
[new_stories_file, default_nlu_data],
output=new_model_path_in_same_dir(trained_e2e_model),
).model
os.remove(new_model_path)
mocked_core_training.assert_called_once()
mocked_nlu_training.assert_called_once()
@pytest.mark.trains_model
def test_retrains_only_core_if_new_e2e_example_seen_before(
self,
monkeypatch: MonkeyPatch,
trained_e2e_model: Text,
default_domain_path: Text,
default_stack_config: Text,
default_e2e_stories_file: Text,
default_nlu_data: Text,
tmp_path: Path,
):
stories_yaml = rasa.shared.utils.io.read_yaml_file(default_e2e_stories_file)
stories_yaml["stories"][1]["steps"].append({"user": "Yes"})
new_stories_file = new_stories_file = tmp_path / "new_stories.yml"
rasa.shared.utils.io.write_yaml(stories_yaml, new_stories_file)
mocked_nlu_training = mock_nlu_training(monkeypatch)
mocked_core_training = mock_core_training(monkeypatch)
new_model_path = train(
default_domain_path,
default_stack_config,
[new_stories_file, default_nlu_data],
output=new_model_path_in_same_dir(trained_e2e_model),
).model
os.remove(new_model_path)
mocked_core_training.assert_called_once()
mocked_nlu_training.assert_not_called()
def test_nlu_and_core_trained_if_no_nlu_data_but_e2e_stories(
self,
monkeypatch: MonkeyPatch,
default_domain_path: Text,
default_stack_config: Text,
default_e2e_stories_file: Text,
tmp_path: Path,
):
mocked_nlu_training = mock_nlu_training(monkeypatch)
mocked_core_training = mock_core_training(monkeypatch)
output = self.make_tmp_model_dir(tmp_path)
train(
default_domain_path,
default_stack_config,
[default_e2e_stories_file],
output=output,
)
mocked_core_training.assert_called_once()
mocked_nlu_training.assert_called_once()
@staticmethod
def make_tmp_model_dir(tmp_path: Path) -> Text:
(tmp_path / "models").mkdir()
output = str(tmp_path / "models")
return output
@pytest.mark.trains_model
def test_new_nlu_data_retrains_core_if_there_are_e2e_stories(
self,
monkeypatch: MonkeyPatch,
trained_e2e_model: Text,
default_domain_path: Text,
default_stack_config: Text,
default_e2e_stories_file: Text,
default_nlu_data: Text,
tmp_path: Path,
):
nlu_yaml = rasa.shared.utils.io.read_yaml_file(default_nlu_data)
nlu_yaml["nlu"][0]["examples"] += "- surprise!\n"
new_nlu_file = tmp_path / "new_nlu.yml"
rasa.shared.utils.io.write_yaml(nlu_yaml, new_nlu_file)
mocked_nlu_training = mock_nlu_training(monkeypatch)
mocked_core_training = mock_core_training(monkeypatch)
new_model_path = train(
default_domain_path,
default_stack_config,
[default_e2e_stories_file, new_nlu_file],
output=new_model_path_in_same_dir(trained_e2e_model),
).model
os.remove(new_model_path)
mocked_core_training.assert_called_once()
mocked_nlu_training.assert_called_once()
@pytest.mark.trains_model
def test_new_nlu_data_does_not_retrain_core_if_there_are_no_e2e_stories(
self,
monkeypatch: MonkeyPatch,
trained_simple_rasa_model: Text,
default_domain_path: Text,
default_stack_config: Text,
simple_stories_file: Text,
default_nlu_data: Text,
tmp_path: Path,
):
nlu_yaml = rasa.shared.utils.io.read_yaml_file(default_nlu_data)
nlu_yaml["nlu"][0]["examples"] += "- surprise!\n"
new_nlu_file = tmp_path / "new_nlu.yml"
rasa.shared.utils.io.write_yaml(nlu_yaml, new_nlu_file)
mocked_nlu_training = mock_nlu_training(monkeypatch)
mocked_core_training = mock_core_training(monkeypatch)
new_model_path = train(
default_domain_path,
default_stack_config,
[simple_stories_file, new_nlu_file],
output=new_model_path_in_same_dir(trained_simple_rasa_model),
).model
os.remove(new_model_path)
mocked_core_training.assert_not_called()
mocked_nlu_training.assert_called_once()
def test_training_core_with_e2e_fails_gracefully(
self,
capsys: CaptureFixture,
monkeypatch: MonkeyPatch,
tmp_path: Path,
default_domain_path: Text,
default_stack_config: Text,
default_e2e_stories_file: Text,
):
mocked_nlu_training = mock_nlu_training(monkeypatch)
mocked_core_training = mock_core_training(monkeypatch)
output = self.make_tmp_model_dir(tmp_path)
train_core(
default_domain_path,
default_stack_config,
default_e2e_stories_file,
output=output,
)
mocked_core_training.assert_not_called()
mocked_nlu_training.assert_not_called()
captured = capsys.readouterr()
assert (
"Stories file contains e2e stories. "
"Please train using `rasa train` so that the NLU model is also trained."
) in captured.out
@pytest.mark.timeout(300)
@pytest.mark.parametrize("use_latest_model", [True, False])
@pytest.mark.trains_model
def test_model_finetuning(
tmp_path: Path,
monkeypatch: MonkeyPatch,
default_domain_path: Text,
default_stories_file: Text,
default_stack_config: Text,
default_nlu_data: Text,
trained_rasa_model: Text,
use_latest_model: bool,
):
mocked_nlu_training = Mock(wraps=rasa.nlu.train)
monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training)
mocked_core_training = Mock(wraps=rasa.core.train)
monkeypatch.setattr(rasa.core, rasa.core.train.__name__, mocked_core_training)
(tmp_path / "models").mkdir()
output = str(tmp_path / "models")
if use_latest_model:
trained_rasa_model = str(Path(trained_rasa_model).parent)
train(
default_domain_path,
default_stack_config,
[default_stories_file, default_nlu_data],
output=output,
force_training=True,
model_to_finetune=trained_rasa_model,
finetuning_epoch_fraction=0.1,
)
mocked_core_training.assert_called_once()
_, kwargs = mocked_core_training.call_args
assert isinstance(kwargs["model_to_finetune"], Agent)
mocked_nlu_training.assert_called_once()
_, kwargs = mocked_nlu_training.call_args
assert isinstance(kwargs["model_to_finetune"], Interpreter)
@pytest.mark.timeout(300)
@pytest.mark.parametrize("use_latest_model", [True, False])
@pytest.mark.trains_model
def test_model_finetuning_core(
tmp_path: Path,
monkeypatch: MonkeyPatch,
trained_moodbot_path: Text,
use_latest_model: bool,
):
mocked_core_training = AsyncMock()
monkeypatch.setattr(rasa.core, rasa.core.train.__name__, mocked_core_training)
mock_agent_load = Mock(wraps=Agent.load)
monkeypatch.setattr(Agent, "load", mock_agent_load)
(tmp_path / "models").mkdir()
output = str(tmp_path / "models")
if use_latest_model:
trained_moodbot_path = str(Path(trained_moodbot_path).parent)
# Typically models will be fine-tuned with a | |
#!/usr/bin/env python
import numpy as np
import sys
from warnings import warn
class Vertex(object):
def __init__(self, x, y, z, name):
"""
Creates the Vertex Object with Carthesian coordinates
Parameters
----------
x: float
x coordinate
y: float
y coordinate
z: float
z coordinate
coordinates: np array
(x, y, z) coordinates
name: string
Attributes
----------
x: float
x coordinate
y: float
y coordinate
z: float
z coordinate
name: string
single letter name
for example, 'A'
"""
self.x = x
self.y = y
self.z = z
self.coordinates = np.array([self.x, self.y, self.z])
self.name = name
class Face(object):
"""
Face of a polyhedron, defined by its vertices.
Attributes
----------
vertices: list
list of Vertex objects confining the face
ID: int
number identifying the Face
middle: np array
middle of the face
also origin of the local coordinate system
normal: np array
normal vector to the face
u: np array
first axis of the local coordinate system
v: np array
second axis of the local coordinate system
size: float
length of middle to first vertex
"""
def __init__(self, vertices, ID):
"""
Creates a Surface Object from a set of vertices
Parameters
----------
vertices: list
list of Vertex objects confining the face
ID: int
number identifying the Face
"""
self.vertices = vertices
self.numVertices = len(self.vertices)
self.ID = ID
# check that at least three vertices are given
try:
assert(self.numVertices >= 3)
except AssertionError:
print("Can not creatre face with less than three vertices.")
sys.exit(1)
self.calc_system()
def calc_system(self):
"""
Calculates middle of the face, normal vector
and local coordinate system
"""
# middle as average of the vertices
self.middle = np.zeros(self.numVertices)
for v in self.vertices:
self.middle += v.coordinates
self.middle = self.middle / self.numVertices
# normal is in the direction of the origin through the middle
self.normal = self.middle / np.linalg.norm(self.middle)
# pick frist axis from v1 to v2, second from v1 to v3
self.u = self.vertices[1].coordinates - self.vertices[0].coordinates
self.v = self.vertices[2].coordinates - self.vertices[0].coordinates
self.angle = np.arccos(np.dot(self.u, self.v) /
(np.linalg.norm(self.u) * np.linalg.norm(self.v)))
def calc_local_vertices(self):
"""
Get position of the vertices of the face in the local coordinate system
Only works for (triangular) icosahedron surfaces)
hard coded maths
Returns
-------
list
list of 2D arrays of the coordinates of the vertices in the local
coodinate system
"""
D = np.linalg.norm(self.middle - self.vertices[0].coordinates)
v1 = np.array([D, 0])
v2 = np.array([-D*np.cos(self.angle), -D*np.sin(self.angle)])
v3 = np.array([-D*np.cos(self.angle), D*np.sin(self.angle)])
return [v1, v2, v3]
def global_to_lcs(self, point):
"""
Convert a point on the face in global (3D) coordinates to the local
coordinate system of the face (2D)
Paramters
---------
point: array-like
3D global coordinates of a point on the face
Returns
-------
NumPy Array: the point in the lcs of the face
"""
eps = 1.0e-6
# check that point is in the plane of the face
assert(np.dot((point - self.middle), self.normal) < eps)
start_vector = self.vertices[0].coordinates
p = point - start_vector
psize = np.linalg.norm(p)
size = np.linalg.norm(self.u)
# calculate angles between p and basis vectors
# check that total angle is the same as calculated when initializing
cos_angle_u = np.dot(self.u, p) / (psize * size)
cos_angle_v = np.dot(self.v, p) / (psize * size)
tot_angle = np.arccos(cos_angle_u) + np.arccos(cos_angle_v)
cos_tangle = np.cos(self.angle)
assert(abs(np.cos(tot_angle) - cos_tangle) < eps)
factor = psize / (1 - cos_tangle**2.0)
a = factor * (cos_angle_u - np.cos(self.angle)*cos_angle_v) / size
b = factor * (cos_angle_v - np.cos(self.angle)*cos_angle_u) / size
# check the lcs coordinates are within the triagle face
assert((a >= 0. or np.isclose(a, 0.)) and (b >= 0. or np.isclose(b,0.)))
assert((a + b) <= 1.0 or np.isclose(a+b, 1.0))
return (a, b)
def lcs_to_global(self, point):
"""
Convert point in lcs to global (3D) coordinates
"""
p = self.middle + point[0]*self.u + point[1]*self.v
return p
def lcs_to_net(self, point, v1net, v2net, v3net, scale=1):
"""
Convert coordinates in lcs to coordinates on the icosahedron net grid
Parameters
----------
point: array-like
2D coordinates of a point in the lcs of the face
v1net, v2net, v3net: array-like
2D coordinates of the vertices of the face on the net
scale: int or float
scale used in the icosahedron net
default = 1
Returns
-------
NumPy Array: coordinates of the point on the net
"""
v1net = np.array(v1net)
v2net = np.array(v2net)
v3net = np.array(v3net)
# corresponding u and v vectors form the face lcs on the net
unet = v2net - v1net
vnet = v3net - v1net
# coordinates of the point on the net
pnet = v1net + point[0]*unet + point[1]*vnet
return pnet
class Icosahedron(object):
"""
Icosahedron (with 20 faces).
"""
def __init__(self):
golden = (1. + np.sqrt(5)) / 2.0
# All of our verices for the icosahedron
A = Vertex(golden, 1, 0, "A")
B = Vertex(-golden, 1, 0, "B")
C = Vertex(golden, -1, 0, "C")
D = Vertex(-golden, -1, 0, "D")
E = Vertex(1, 0, golden, "E")
F = Vertex(-1, 0, golden, "F")
G = Vertex(1, 0, -golden, "G")
H = Vertex(-1, 0, -golden, "H")
I = Vertex(0, golden, 1, "I")
J = Vertex(0, -golden, 1, "J")
K = Vertex(0, golden, -1, "K")
L = Vertex(0, -golden, -1, "L")
self.vertices = [A, B, C, D, E, F, G, H, I, J, K, L]
# All of our Faces for the icosahedron
F1 = Face([H, L, D], 1)
F2 = Face([H, D, B], 2)
F3 = Face([H, B, K], 3)
F4 = Face([H, K, G], 4)
F5 = Face([H, G, L], 5)
F6 = Face([C, L, G], 6)
F7 = Face([G, A, C], 7)
F8 = Face([A, G, K], 8)
F9 = Face([K, I, A], 9)
F10 = Face([I, K, B], 10)
F11 = Face([B, F, I], 11)
F12 = Face([F, B, D], 12)
F13 = Face([D, J, F], 13)
F14 = Face([J, D, L], 14)
F15 = Face([L, C, J], 15)
F16 = Face([E, J, C], 16)
F17 = Face([E, C, A], 17)
F18 = Face([E, A, I], 18)
F19 = Face([E, I, F], 19)
F20 = Face([E, F, J], 20)
self.faces = [F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12,
F13, F14, F15, F16, F17, F18, F19, F20]
def pick_face(self, p):
"""
Pick which face a point p should be projected on
Parameters
----------
p: array-like
point p in theta, phi coordinates
Returns
-------
Face: The face p is to be projected on
"""
lowest = np.pi*2
closest_face = self.faces[0]
for f in self.faces:
pmiddle = point_to_sphere(f.middle)
dist = ang_distance(p, pmiddle)
if dist < lowest:
lowest = dist
closest_face = f
return closest_face
def project_in_3D(self, p):
"""
Project a point p onto one of the faces of the icosahedron.
Parameters
----------
p: array-like
point p in theta, phi coordinates
Returns
-------
Face: Face the point was projected on
NumPy Array: (x, y, z) coodinates of the projected point
in global coordinates
"""
face = self.pick_face(p)
# projected point in the intersection of a ray from the origin through
# the point in spherical coordinates and the plane of the face
pcart = sphere_to_cart(p)
projp = (np.dot(face.middle, face.normal) /
np.dot(pcart, face.normal)) * pcart
return face, projp
def project_in_lcs(self, p):
"""
Project a point onto one of the faces of the icosahedron.
Parameters
----------
p: array-like
point p in theta, phi coordinates
Returns
-------
Face: Face the point was projected on
NumPy Array: (x, y) coordinates of the projected point
in the lcs of the face
"""
face, projp3D = self.project_in_3D(p)
return face, face.global_to_lcs(projp3D)
def ang_distance(p1, p2):
"""
Calculate angular distance between two point on the 1-sphere p1 and p2.
Parameters
----------
p1: array-like
point 1 in theta, phi coordinates
p2: array-like
| |
from gmspython import *
import gams_production,global_settings
class pr_static(gmspython):
def __init__(self,nt=None,pickle_path=None,work_folder=None,pw='pricewedge',kwargs_ns={},**kwargs_gs):
databases = None if nt is None else [nt.database.copy()]
super().__init__(module='pr_static',pickle_path=pickle_path,work_folder=work_folder,databases=databases,**kwargs_gs)
if pickle_path is None:
self.version = nt.version
self.ns = {**self.ns, **self.namespace_global_sets(nt,kwargs_ns), **self.namespace_global_variables(kwargs_ns)}
self.ns_local = {**self.ns_local, **self.namespace_local_sets(nt)}
self.pw = getattr(gams_production,pw)()
for tree in nt.trees.values():
DataBase.GPM_database.merge_dbs(self.model.database,tree.database,'first')
self.add_default_subsets()
# --- 1: Retrieve namespace from nesting trees --- #
def namespace_global_sets(self,nt,kwargs):
""" retrieve attributes from global tree"""
std_sets = {setname: getattr(nt,setname) for setname in ('n','nn','nnn','inp','out','int','wT','map_all','kno_out','kno_inp','s') if setname in nt.__dict__}
self.sector = True if hasattr(nt,'s') else False
std_sets['PwT_dom'] = nt.PwT_dom if self.version=='Q2P' else nt.wT
std_sets['exo_mu'] = df('exo_mu',kwargs)
std_sets['endo_PbT'] = df('endo_PbT',kwargs)
std_sets['n_out'] = df('n_out',kwargs)
if self.sector is not False:
std_sets['s_prod'] = df('s_prod',kwargs)
return std_sets
def add_default_subsets(self):
self.model.database[self.n('n_out')] = self.get('out').levels[-1] if isinstance(self.get('out'),pd.MultiIndex) else self.get('out')
if self.sector is not False:
self.model.database[self.n('s_prod')] = self.get('out').levels[0]
def namespace_global_variables(self,kwargs):
"""create global namespace for variables used in partial equilibrium model. kwargs modify the names."""
return {varname: df(varname,kwargs) for varname in self.default_variables}
@property
def default_variables(self):
return ('PwT','PbT','qS','qD','mu','sigma','eta','Peq','markup','tauS','tauLump')
def namespace_local_sets(self,nt):
"""create namespace for each tree, by copying attributes."""
return {tree: {attr: nt.trees[tree].__dict__[attr] for attr in nt.trees[tree].__dict__ if attr not in set(['tree','database']).union(nt.prune_trees)} for tree in nt.trees}
# --- 2: Initialize variables --- #
# Note: The method 'intialiaze_variables' should be provided; however, how this works is optional.
def initialize_variables(self,**kwargs):
try:
if kwargs['check_variables'] is True:
for var in self.default_variables:
if self.ns[var] not in self.model.database.symbols:
self.model.database[self.ns[var]] = self.default_var_series(var)
else:
self.model.database[self.ns[var]].vals = DataBase.merge_symbols(self.get(var),self.default_var_series(var))
except KeyError:
for var in self.default_variables:
if self.ns[var] not in self.model.database.symbols:
self.model.database[self.ns[var]] = self.default_var_series(var)
if self.ns['exo_mu'] not in self.model.database.symbols:
self.add_calibration_subsets()
if self.state == 'calibrate':
self.model.settings.set_conf('solve',self.add_solve)
def default_var_series(self,var):
if var=='PbT':
return pd.Series(1, index = self.get('out'), name = self.n(var))
elif var == 'PwT':
return pd.Series(1, index = self.get('PwT_dom'), name = self.n(var))
elif var == 'qS':
return pd.Series(1, index = self.get('out'), name = self.n(var))
elif var == 'qD':
return pd.Series(1, index = self.get('wT'), name = self.n(var))
elif var == 'mu':
return pd.Series(1, index = self.get('map_all'), name=self.n(var))
elif var == 'sigma':
return pd.Series(1, index = self.get('kno_inp'), name = self.n(var))
elif var == 'eta':
return pd.Series(-1, index = self.get('kno_out'), name = self.n(var))
elif var == 'Peq':
return pd.Series(1, index = self.get('n_out'), name = self.n(var))
elif var == 'markup':
return pd.Series(0, index = self.get('out'), name = self.n(var))
elif var == 'tauS':
return pd.Series(0, index = self.get('out'), name = self.n(var))
elif var == 'tauLump':
return 0 if self.sector is False else pd.Series(0, index = self.get('s_prod'), name = self.n(var))
def add_calibration_subsets(self):
(self.model.database[self.ns['endo_PbT']],self.model.database[self.ns['exo_mu']]) = self.calib_subsets
@property
def calib_subsets(self):
endo_pbt, exo_mu = empty_index(self.get('out')),empty_index(self.get('map_all'))
for tree in self.ns_local:
if self.n('type_io',tree=tree)=='input':
endo_pbt = endo_pbt.union(self.get('tree_out',tree=tree))
map_ = self.get('map_',tree=tree)
exo_mu = exo_mu.union(map_[(map_.droplevel(self.n('nn')).isin(self.get('int')))])
elif self.n('type_io',tree=tree)=='output':
map_ = self.get('map_',tree=tree)
tree_out = self.get('tree_out',tree=tree)
for x in self.get('knots',tree=tree):
z = map_[(map_.droplevel(self.n('n')).isin([x])) & (map_.droplevel(self.n('nn')).isin(tree_out))]
if not z.empty:
endo_pbt = endo_pbt.insert(0,z.droplevel(self.n('nn'))[0])
exo_mu = exo_mu.insert(0,z[0])
exo_mu = exo_mu.union(map_[~(map_.droplevel(self.n('nn')).isin(tree_out))])
return endo_pbt,exo_mu
# --- 3: Define groups --- #
def group_conditions(self,group):
if group == 'g_tech_exo':
return [{'sigma': self.g('kno_inp'), 'eta': self.g('kno_out'), 'mu': self.g('exo_mu')}]
elif group == 'g_tech_endo':
return [{'mu': {'and': [self.g('map_all'), {'not': self.g('exo_mu')}]}, 'markup': self.g('out')}]
elif group == 'g_endovars':
return [{'PwT': self.g('int'), 'qD': self.g('int'), 'PbT': self.g('endo_PbT')}]
elif group == 'g_exovars':
return [{'PwT': self.g('inp'), 'qS': self.g('out'),'tauS': self.g('out'), 'tauLump': None if self.sector is False else self.g('s_prod')}]
elif group == 'g_calib_exo':
return [{'qD': self.g('inp'), 'PbT': {'and': [self.g('out'), {'not': self.g('endo_PbT')}]}, 'Peq': self.g('n_out')}]
elif group == 'g_tech':
return ['g_tech_endo','g_tech_exo']
@property
def exo_groups(self):
""" Collect exogenous groups """
n = self.model.settings.name+'_'
if self.state=='B':
return {n+g: self.add_group(g,n=n) for g in ('g_tech','g_exovars')}
elif self.state in ('SC','DC'):
return {n+g: self.add_group(g,n=n) for g in ('g_tech_exo','g_exovars','g_calib_exo')}
@property
def endo_groups(self):
""" Collect endogenous groups """
n = self.model.settings.name+'_'
if self.state=='B':
return {n+g: self.add_group(g,n=n) for g in ('g_endovars','g_calib_exo')}
elif self.state in ('SC','DC'):
return {n+g: self.add_group(g,n=n) for g in ('g_endovars','g_tech_endo')}
@property
def sub_groups(self):
""" Collect groups that are subgroups of other groups; these are not written to list of exogenous/endogenous groups. """
n = self.model.settings.name+'_'
return {n+g: self.add_group(g,n=n) for g in ('g_tech_endo','g_tech_exo')}
@property
def add_solve(self):
if self.state == 'calibrate':
return f"""solve {self.model.settings.get_conf('name')} using NLP min {self.g('obj').write()}"""
else:
return None
# --- 4: Define blocks --- #
@property
def blocktext(self):
return {**{f"M_{tree}": self.eqtext(tree) for tree in self.ns_local},**{f"M_{self.model.settings.name}_pw":self.init_pw()}}
@property
def mblocks(self):
return set([f"M_{tree}" for tree in self.ns_local]+[f"M_{self.model.settings.name}_pw"])
def init_pw(self):
self.pw.add_symbols(self.model.database,self.ns)
self.pw.add_conditions(self.model.database)
return self.pw.run(self.model.settings.name)
def eqtext(self,tree_name):
tree = self.ns_local[tree_name]
gams_class = getattr(gams_production,tree['type_f'])(version=tree['version'])
gams_class.add_symbols(self.model.database,tree,ns_global=self.ns)
gams_class.add_conditions(self.model.database,tree)
return gams_class.run(tree_name)
# --- 5: Special run methods --- #
# --- 6: Add sector --- #
def add_sector(self,s,add_to_existing_s=False,excep_global = ['n_out'],local_exceptions = {},**kwargs):
self.add_sector_to_namespace(**kwargs)
self.s = s
self.add_sector_to_sets(add_to_existing=add_to_existing_s)
self.add_sector_to_subsets(exceptions=excep_global)
self.add_sector_to_variables(exceptions=excep_global)
for tree in self.ns_local:
self.add_sector_to_local(tree,exceptions=local_exceptions[tree] if tree in local_exceptions else [])
def add_sector_to_namespace(self,**kwargs):
self.ns.update({set_: df(set_,kwargs) for set_ in ['s','s_prod']})
def add_sector_to_sets(self,add_to_existing=False):
if self.ns['s'] not in self.model.database.symbols:
self.model.database[self.ns['s']] = pd.Index([self.s],name=self.ns['s'])
elif add_to_existing is True:
self.model.database[self.ns['s']].vals = self.model.database[self.ns['s']].vals.union(pd.Index([self.s],name=self.ns[s]))
def add_sector_to_local(self,tree,exceptions=[]):
ste = ['name','type_io','version','temp_namespace','type_f']
[self.add_sector_to_subset(ss) for ss in set(self.dvbk(self.ns_local[tree],exceptions+ste)).intersection(set(self.model.database.sets['subsets']+self.model.database.sets['mappings']))];
[self.add_sector_to_variable(var) for var in set(self.dvbk(self.ns_local[tree],exceptions+ste)).intersection(set(self.model.database.variables_flat+self.model.database.parameters_flat))];
def dvbk(self,obj,exceptions):
return [v for k,v in obj.items() if k not in exceptions]
def add_sector_to_subsets(self,exceptions=['n_out']):
[self.add_sector_to_subset(ss) for ss in set(self.dvbk(self.ns,exceptions)).intersection(set(self.model.database.sets['subsets']+self.model.database.sets['mappings']))];
def add_sector_to_variables(self,exceptions=[]):
[self.add_sector_to_variable(var) for var in set(self.dvbk(self.ns,exceptions)).intersection(set(self.model.database.variables_flat+self.model.database.parameters_flat))];
def add_sector_to_variable(self,var):
db =self.model.database
if db[var].gtype in ('scalar_variable','scalar_parameter'):
gtype = db[var].gtype
db[var] = pd.Series(db[var],index=self.get('s')[self.get('s')==self.s],name=var)
db[var].gtype = gtype.split('_')[-1]
elif self.ns['s'] not in db[var].domains:
db[var].vals.index = DataBase_wheels.prepend_index_with_1dindex(db[var].index,self.get('s')[self.get('s')==self.s])
def add_sector_to_subset(self,subset):
db =self.model.database
if self.ns['s'] not in db[subset].domains:
db[subset] = DataBase_wheels.prepend_index_with_1dindex(db.get(subset),self.get('s')[self.get('s')==self.s])
class pr_dynamic(gmspython):
def __init__(self,nt=None,pickle_path=None,work_folder=None,ict='ict_v1',gs_v='gs_v1',pw='pricewedge',kwargs_ns={},kwargs_st={},gs_vals={},**kwargs_gs):
databases = None if nt is None else [nt.database.copy()]
super().__init__(module='pr_dynamic',pickle_path=pickle_path,work_folder=work_folder,databases=databases,**kwargs_gs)
if pickle_path is None:
self.version = nt.version
self.ns = {**self.ns, **self.namespace_global_sets(nt,kwargs_ns), **self.namespace_global_variables(kwargs_ns)}
self.ns_local = {**self.ns_local, **self.namespace_local_sets(nt)}
self.add_global_settings(gs_v,kwargs_ns=kwargs_ns,kwargs_vals=gs_vals,dynamic=True)
self.ict = getattr(gams_production,ict)(**kwargs_ns)
self.pw = getattr(gams_production,pw)()
for tree in nt.trees.values():
DataBase.GPM_database.merge_dbs(self.model.database,tree.database,'first')
if 'ss' in kwargs_st:
self.sector = True
self.add_sector_ict(kwargs_st['ss'],**kwargs_ns)
self.add_default_subsets()
# --- 1: Retrieve namespace from nesting trees --- #
def namespace_global_sets(self,nt,kwargs):
""" retrieve attributes from global tree"""
std_sets = {setname: getattr(nt,setname) for setname in ('n','nn','nnn','inp','out','int','wT','map_all','kno_out','kno_inp','s') if setname in nt.__dict__}
std_sets['PwT_dom'] = nt.PwT_dom if self.version=='Q2P' else nt.wT
std_sets['exo_mu'] = df('exo_mu',kwargs)
std_sets['endo_PbT'] = df('endo_PbT',kwargs)
std_sets['n_out'] = df('n_out',kwargs)
return std_sets
def add_sector_set(self,set_,index):
""" Add the sector index 's_prod' to an multiindex 'set_' at the index'th place. """
return set_ if self.sector is False else DataBase_wheels.prepend_index_with_1dindex(set_,self.get('ss')).reorder_levels(set_.names[0:index]+self.g('ss').domains+set_.names[index:])
def add_sector_set_from_product(self,list_of_sets,index):
return pd.MultiIndex.from_product(list_of_sets) if self.sector is False else pd.MultiIndex.from_product(list_of_sets[0:index]+[self.get('ss')]+list_of_sets[index:])
def add_default_subsets(self):
self.model.database[self.n('n_out')] = self.get('out').levels[-1] if isinstance(self.get('out'),pd.MultiIndex) else self.get('out')
def namespace_global_variables(self,kwargs):
"""create global namespace for variables used in partial equilibrium model. kwargs modify the names."""
return {varname: df(varname,kwargs) for varname in self.default_variables}
@property
def default_variables(self):
return ('PwT','PbT','qS','qD','mu','sigma','eta','Peq','markup','tauS','tauLump','Rrate','rDepr')
def namespace_local_sets(self,nt):
"""create namespace for each tree, by copying attributes."""
return {tree: {attr: nt.trees[tree].__dict__[attr] for attr in nt.trees[tree].__dict__ if attr not in set(['tree','database']).union(nt.prune_trees)} for tree in nt.trees}
def add_sector_ict(self,sector,**kwargs):
""" add the subset of sectors for which we apply the installation cost module. The sectoral set 's' must be included."""
self.ns['ss'] = 's_prod' if 'ss' not in kwargs else kwargs['ss']
self.model.database[self.ns['ss']] = self.get('s')[self.get('s').isin(sector)];
self.ict.sector = self.ns['ss']
# --- 2: Initialize methods --- #
def initialize_variables(self,**kwargs):
try:
if kwargs['check_variables'] is True:
for var in self.default_variables:
if self.ns[var] not in self.model.database.symbols:
self.model.database[self.ns[var]] = self.default_var_series(var)
else:
self.model.database[self.ns[var]].vals = DataBase.merge_symbols(self.get(var),self.default_var_series(var))
except KeyError:
for var in self.default_variables:
if self.ns[var] not in self.model.database.symbols:
self.model.database[self.ns[var]] = self.default_var_series(var)
if self.ns['exo_mu'] not in self.model.database.symbols:
self.add_calibration_subsets()
self.initialize_ict()
# --- 2.1: Add time to the model durables --- #
def add_dur(self,dur,dur2inv=None,kwargs_ns={},**kwargs):
""" Add the time index and durables to model. """
self.add_dur_to_namespace(**kwargs_ns)
self.add_durables_to_database(dur,dur2inv=dur2inv)
def ivfs(self,static,variables=['qS','qD','PwT','PbT','Peq','tauS','tauLump'],merge=True):
""" initialize variables from database w. static version """
for var in variables:
if var not in ('qD','PwT'):
add_var = DataBase_wheels.repeat_variable_windex(static.get(self.ns[var]),self.get('txE'))
else:
ndurs = DataBase_wheels.repeat_variable_windex(static.get(self.ns[var]),self.get('txE'))
durs = DataBase_wheels.repeat_variable_windex(static.get(self.ns[var])[static.get(self.ns[var]).index.get_level_values(self.ns['n']).isin(self.get('dur'))],self.get('t'))
add_var = ndurs.combine_first(durs)
if merge is True and self.ns[var] in self.model.database.symbols:
self.model.database[self.ns[var]] = add_var.combine_first(self.get(var))
else:
self.model.database[self.ns[var]] = add_var
def add_dur_to_namespace(self,**kwargs):
"""" add to namespace """
self.ns.update({set_: df(set_,kwargs) for set_ in self.time_ns})
@property
def time_ns(self):
return ('dur','ndur','inv','dur2inv')
def add_durables_to_database(self,dur,dur2inv=None):
self.add_durable_sets_to_database(dur,dur2inv=dur2inv)
self.adjust_subsets_to_durables()
for var in self.default_variables:
if self.ns[var] not in self.model.database.symbols:
self.model.database[self.ns[var]] = self.default_var_series(var)
def adjust_subsets_to_durables(self):
""" add investment goods to inputs, move durables to intermediate goods."""
dur = self.get('inp')[self.get('inp').get_level_values(self.ns['n']).isin(self.get('dur'))] if self.sector is True else self.get('dur')
inv = DataBase_wheels.mi.map_v1(dur,self.get('dur2inv')) if self.sector is True else self.get('inv')
self.model.database[self.ns['inp']] = self.get('inp').drop(dur).union(inv)
self.model.database[self.ns['wT']] = self.get('wT').union(inv)
self.model.database[self.ns['PwT_dom']] = self.get('PwT_dom').union(inv)
self.model.database[self.ns['int']] = self.get('int').union(dur)
def add_durable_sets_to_database(self,dur,dur2inv=None):
""" Add sets/subsets to database."""
self.model.database[self.ns['dur']] = pd.Index(dur,name=self.ns['n'])
if dur2inv is None:
self.model.database[self.ns['dur2inv']] = pd.MultiIndex.from_tuples(list(zip(*[self.get('dur'),'I_'+self.get('dur')])), names = [self.ns['n'],self.model.database.alias_dict[self.ns['n']][0]])
else:
self.model.database[self.ns['dur2inv']] = dur2inv
self.model.database[self.ns['inv']] = pd.Index(self.get('dur2inv').get_level_values(1).unique(),name=self.ns['n'])
self.model.database[self.ns['ndur']]= pd.Index(set(self.get('n'))-set(self.get('dur'))-set(self.get('inv')),name=self.ns['n'])
self.model.database[self.ns['n']] = self.get('n').union(self.get('inv'))
# --- 2.2: Define default initial values for variables --- #
def initialize_ict(self):
self.ict.add_symbols(self.model.database,self.ns)
self.ict.add_conditions(self.model.database,self.ns)
self.ns = {**self.ns,**self.ict.ns}
def default_var_series(self,var):
if var=='PbT':
return pd.Series(1, index = DataBase_wheels.prepend_index_with_1dindex(self.get('out'),self.get('txE')), name = self.n(var))
elif var == 'PwT':
return pd.Series(1, index = DataBase_wheels.prepend_index_with_1dindex(self.get('PwT_dom'),self.get('txE')), name = self.n(var))
elif var == 'qS':
return pd.Series(1, index = DataBase_wheels.prepend_index_with_1dindex(self.get('out'),self.get('txE')), name = self.n(var))
elif var == 'qD':
durables_tE = DataBase_wheels.prepend_index_with_1dindex(self.get('wT')[self.get('wT').get_level_values(self.ns['n']).isin(self.get('dur'))],self.get('tE'))
return pd.Series(1, index = DataBase_wheels.prepend_index_with_1dindex(self.get('wT'),self.get('txE')).union(durables_tE), name = self.n(var))
elif var == 'mu':
return pd.Series(1, index = self.get('map_all'), name=self.n(var))
elif var == 'sigma':
return pd.Series(0.5, index = self.get('kno_inp'), name = self.n(var))
elif var == 'eta':
return pd.Series(1, index = self.get('kno_out'), name = self.n(var))
elif var == 'Peq':
return pd.Series(1, index = DataBase_wheels.prepend_index_with_1dindex(self.get('n_out'),self.get('txE')), name = self.n(var))
elif var == 'markup':
return pd.Series(0, index = self.get('out'), name = self.n(var))
elif var == 'tauS':
return pd.Series(0, index = DataBase_wheels.prepend_index_with_1dindex(self.get('out'),self.get('txE')), name = self.n(var))
elif var == 'tauLump':
index = self.get('txE') if self.sector is False else DataBase_wheels.prepend_index_with_1dindex(self.get('s_prod'),self.get('txE'))
return pd.Series(0, index = index, name=self.n(var))
elif var=='Rrate':
return pd.Series(self.get('R_LR'), index = self.get('txE'), name = self.ns['Rrate'])
elif var=='rDepr':
return pd.Series(0.05, index = self.add_sector_set_from_product([self.get('t'),self.get('dur')],1),name=self.ns['rDepr'])
elif var in self.ict.ns:
return self.ict.default_var_series(var)
def ss_rDepr(self,GE_data,inplace=True):
inv = DataBase_wheels.mi.v1_series(GE_data[self.n('qD')].rctree_pd(self.g('inv')), pd.MultiIndex.from_tuples(self.get('dur2inv').swaplevel(0,1).values,names=[self.n('n'),self.n('nn')]))
rDepr = pd.Series(DataBase_wheels.repeat_variable_windex(inv/GE_data[self.n('qD')].rctree_pd(self.g('dur'))-self.get('g_LR'),self.get('t')),name=self.n('rDepr'))
if inplace is True:
self.model.database[self.n('rDepr')] = rDepr
else:
return rDepr
# --- 3: Define groups --- #
def group_conditions(self,group):
if group == 'g_tech_exo':
return [{'sigma': self.g('kno_inp'), 'eta': self.g('kno_out'), 'mu': self.g('exo_mu')}]
elif group == 'g_tech_exo_dyn':
return [{'rDepr': self.g('dur'),'Rrate': None}]
elif group == 'g_tech_endo':
return [{'mu': {'and': [self.g('map_all'), {'not': self.g('exo_mu')}]},'markup': self.g('out')}]
elif group == 'gvars_endo':
return [{'PbT': self.g('endo_PbT'), 'PwT': self.g('int'), 'qD': {'or': [{'and': [self.g('wT'), self.g('tx0')]}, {'and': [self.g('int'), self.g('t0'), {'not': self.g('dur')}]}]},'Peq': {'and': [self.g('n_out'),self.g('tx0E')]}}]
elif group == 'gvars_exo':
return [{'qS': self.g('out'), 'PwT': self.g('inp'), 'qD': {'and': [self.g('dur'), self.g('t0')]}, 'tauS': self.g('out'), 'tauLump': None if self.sector is False else self.g('s_prod')}]
elif group == 'g_calib_exo':
return [{'qD': {'and': [self.g('inp'), self.g('t0')]}, 'PbT': {'and': [self.g('t0'),self.g('out'),{'not': self.g('endo_PbT')}]}, 'Peq': {'and': [self.g('t0'), self.g('n_out')]}}]
elif group == 'g_tech':
return ['g_tech_exo','g_tech_exo_dyn','g_tech_endo']
elif group == 'g_vars_exo':
return ['gvars_exo']
elif group == 'g_vars_endo':
return ['gvars_endo','g_calib_exo']
else:
return self.ict.group_conditions(group)
@property
def exo_groups(self):
""" Collect exogenous groups """
n = self.model.settings.name+'_'
if self.state=='B':
return {n+g: self.add_group(g,n=n) for g in ['g_tech','g_vars_exo']+self.ict.exo_groups()}
elif self.state in ('SC','DC'):
return {n+g: self.add_group(g,n=n) for g in ['g_tech_exo','g_tech_exo_dyn','gvars_exo','g_calib_exo']+self.ict.endo_groups()}
@property
def endo_groups(self):
""" Collect endogenous groups """
n = self.model.settings.name+'_'
if self.state=='B':
return {n+g: self.add_group(g,n=n) for g in ['g_vars_endo']+self.ict.endo_groups()}
elif self.state in ('SC','DC'):
return {n+g: self.add_group(g,n=n) for g in ['g_tech_endo','gvars_endo']+self.ict.endo_groups()}
@property
def sub_groups(self):
""" Collect groups that are | |
: iterable object of Feature, optional
The features to create the :class:`Annotation` from. if not
provided, an empty :class:`Annotation` is created.
Examples
--------
Creating an annotation from a feature list:
>>> feature1 = Feature("CDS", [Location(-10, 30 )], qual={"gene" : "test1"})
>>> feature2 = Feature("CDS", [Location(20, 50 )], qual={"gene" : "test2"})
>>> annotation = Annotation([feature1, feature2])
>>> for f in sorted(list(annotation)):
... print(f.qual["gene"], "".join([str(loc) for loc in f.locs]))
test1 -10-30 >
test2 20-50 >
Merging two annotations and a feature:
>>> feature3 = Feature("CDS", [Location(100, 130 )], qual={"gene" : "test3"})
>>> feature4 = Feature("CDS", [Location(150, 250 )], qual={"gene" : "test4"})
>>> annotation2 = Annotation([feature3, feature4])
>>> feature5 = Feature("CDS", [Location(-50, 200 )], qual={"gene" : "test5"})
>>> annotation = annotation + annotation2 + feature5
>>> for f in sorted(list(annotation)):
... print(f.qual["gene"], "".join([str(loc) for loc in f.locs]))
test5 -50-200 >
test1 -10-30 >
test2 20-50 >
test3 100-130 >
test4 150-250 >
Location based indexing, note the defects:
>>> annotation = annotation[40:150]
>>> for f in sorted(list(annotation)):
... gene = f.qual["gene"]
... loc_str = "".join([f"{loc} {loc.defect}" for loc in f.locs])
... print(gene, loc_str)
test5 40-149 > Defect.MISS_RIGHT|MISS_LEFT
test2 40-50 > Defect.MISS_LEFT
test3 100-130 > Defect.NONE
"""
def __init__(self, features=None):
if features is None:
self._features = set()
else:
self._features = set(features)
def __repr__(self):
"""Represent Annotation as a string for debugging."""
return f'Annotation([{", ".join([feat.__repr__() for feat in self._features])}])'
def __copy_create__(self):
return Annotation(self._features)
def get_features(self):
"""
Get a copy of the internal feature set.
Returns
-------
feature_list : list of Feature
A copy of the internal feature set.
"""
return copy.copy(self._features)
def add_feature(self, feature):
"""
Add a feature to the annotation.
Parameters
----------
feature : Feature
Feature to be added.
"""
if not isinstance(feature, Feature):
raise TypeError(
f"Only 'Feature' objects are supported, "
f"not {type(feature).__name__}"
)
self._features.add(feature)
def get_location_range(self):
"""
Get the range of feature locations,
i.e. the first and exclusive last base/residue.
Returns
-------
int : start
Start location.
int : stop
Exclusive stop location.
"""
first = sys.maxsize
last = -sys.maxsize
for feature in self._features:
for loc in feature.locs:
if loc.first < first:
first = loc.first
if loc.last > last:
last = loc.last
# Exclusive stop -> +1
return first, last+1
def del_feature(self, feature):
"""
Delete a feature from the annotation.
Parameters
----------
feature : Feature
Feature to be removed.
Raises
------
KeyError
If the feature is not in the annotation
"""
self._features.remove(feature)
def __add__(self, item):
if isinstance(item, Annotation):
return Annotation(self._features | item._features)
elif isinstance(item, Feature):
return Annotation(self._features | set([item]))
else:
raise TypeError(
f"Only 'Feature' and 'Annotation' objects are supported, "
f"not {type(item).__name__}"
)
def __iadd__(self, item):
if isinstance(item, Annotation):
self._features |= item._features
elif isinstance(item, Feature):
self._features.add(item)
else:
raise TypeError(
f"Only 'Feature' and 'Annotation' objects are supported, "
f"not {type(item).__name__}"
)
return self
def __getitem__(self, index):
if isinstance(index, slice):
i_first = index.start
# If no start or stop index is given, include all
if i_first is None:
i_first = -sys.maxsize
i_last = index.stop -1
if i_last is None:
i_last = sys.maxsize
sub_annot = Annotation()
for feature in self:
locs_in_scope = []
for loc in feature.locs:
# Always true for maxsize values
# in case no start or stop index is given
if loc.first <= i_last and loc.last >= i_first:
# The location is at least partly in the
# given location range
# Handle defects
first = loc.first
last = loc.last
defect = loc.defect
if loc.first < i_first:
defect |= Location.Defect.MISS_LEFT
first = i_first
if loc.last > i_last:
defect |= Location.Defect.MISS_RIGHT
last = i_last
locs_in_scope.append(Location(
first, last, loc.strand, defect
))
if len(locs_in_scope) > 0:
# The feature is present in the new annotation
# if any of the original locations is in the new
# scope
new_feature = Feature(
key=feature.key, locs=locs_in_scope, qual=feature.qual
)
sub_annot.add_feature(new_feature)
return sub_annot
else:
raise TypeError(
f"'{type(index).__name__}' instances are invalid indices"
)
def __delitem__(self, item):
if not isinstance(item, Feature):
raise TypeError(
f"Only 'Feature' objects are supported, "
f"not {type(item).__name__}"
)
self.del_feature(item)
def __iter__(self):
return self._features.__iter__()
def __contains__(self, item):
return item in self._features
def __eq__(self, item):
if not isinstance(item, Annotation):
return False
return self._features == item._features
def __len__(self):
return len(self._features)
class AnnotatedSequence(Copyable):
"""
An :class:`AnnotatedSequence` is a combination of a
:class:`Sequence` and an :class:`Annotation`.
Indexing an :class:`AnnotatedSequence` with a slice returns another
:class:`AnnotatedSequence` with the corresponding subannotation and
a sequence start corrected subsequence, i.e. indexing starts at 1
with the default sequence start 1.
The sequence start in the newly created :class:`AnnotatedSequence`
is the start of the slice.
Furthermore, integer indices are allowed in which case the
corresponding symbol of the sequence is returned (also sequence
start corrected).
In both cases the index must be in range of the sequence, e.g. if
sequence start is 1, index 0 is not allowed.
Negative indices do not mean indexing from the end of the sequence,
in contrast to the behavior in :class:`Sequence` objects.
Both index types can also be used to modify the sequence.
Another option is indexing with a :class:`Feature` (preferably from the
:class:`Annotation` in the same :class:`AnnotatedSequence`).
In this case a sequence, described by the location(s) of the
:class:`Feature`, is returned.
When using a :class:`Feature` for setting an
:class:`AnnotatedSequence` with a sequence, the new sequence is
replacing the locations of the
:class:`Feature`.
Note the the replacing sequence must have the same length as the
sequence of the :class:`Feature` index.
Parameters
----------
sequence : Sequence
The sequence.
Usually a :class:`NucleotideSequence` or
:class:`ProteinSequence`.
annotation : Annotation
The annotation corresponding to `sequence`.
sequence_start : int, optional
By default, the first symbol of the sequence is corresponding
to location 1 of the features in the annotation. The location
of the first symbol can be changed by setting this parameter.
Negative values are not supported yet.
Attributes
----------
sequence : Sequence
The represented sequence.
annotation : Annotation
The annotation corresponding to `sequence`.
sequence_start : int
The location of the first symbol in the sequence.
See also
--------
Annotation, Sequence
Examples
--------
Creating an annotated sequence
>>> sequence = NucleotideSequence("ATGGCGTACGATTAGAAAAAAA")
>>> feature1 = Feature("misc_feature", [Location(1,2), Location(11,12)],
... {"note" : "walker"})
>>> feature2 = Feature("misc_feature", [Location(16,22)], {"note" : "poly-A"})
>>> annotation = Annotation([feature1, feature2])
>>> annot_seq = AnnotatedSequence(annotation, sequence)
>>> print(annot_seq.sequence)
ATGGCGTACGATTAGAAAAAAA
>>> for f in sorted(list(annot_seq.annotation)):
... print(f.qual["note"])
walker
poly-A
Indexing with integers, note the sequence start correction
>>> print(annot_seq[2])
T
>>> print(annot_seq.sequence[2])
G
indexing with slices
>>> annot_seq2 = annot_seq[:16]
>>> print(annot_seq2.sequence)
ATGGCGTACGATTAG
>>> for f in annot_seq2.annotation:
... print(f.qual["note"])
walker
Indexing with features
>>> print(annot_seq[feature1])
ATAT
>>> print(annot_seq[feature2])
AAAAAAA
>>> print(annot_seq.sequence)
ATGGCGTACGATTAGAAAAAAA
>>> annot_seq[feature1] = NucleotideSequence("CCCC")
>>> print(annot_seq.sequence)
CCGGCGTACGCCTAGAAAAAAA
"""
def __init__(self, annotation, sequence, sequence_start=1):
self._annotation = annotation
self._sequence = sequence
self._seqstart = sequence_start
def __repr__(self):
"""Represent AnnotatedSequence as a string for debugging."""
return f'AnnotatedSequence({self._annotation.__repr__()}, {self._sequence.__repr__()}, ' \
f'sequence_start={self._seqstart})'
@property
def sequence_start(self):
return self._seqstart
@property
def sequence(self):
return self._sequence
@property
def annotation(self):
return self._annotation
def __copy_create__(self):
return AnnotatedSequence(
self._annotation.copy(), self._sequence.copy, self._seqstart)
def reverse_complement(self, sequence_start=1):
"""
Create the reverse complement of the annotated sequence.
This method accurately converts the position and the strand of
the annotation.
The information on the sequence start is lost.
Parameters
----------
sequence_start : int, optional
The location of the first symbol in the reverse complement
sequence.
Returns
-------
The reverse complement of the annotated sequence.
"""
rev_seqstart = sequence_start
rev_sequence = self._sequence.reverse().complement()
seq_len = len(self._sequence)
rev_features = []
for feature in self._annotation:
rev_locs = []
for loc in feature.locs:
# Transform location to the reverse complement strand
# (seq_len-1) -> last sequence index
# (loc.last-self._seqstart) -> location to index
# ... + rev_seqstart -> index to location
rev_loc_first \
= (seq_len-1) - (loc.last-self._seqstart) + rev_seqstart
rev_loc_last \
= (seq_len-1) - (loc.first-self._seqstart) + rev_seqstart
if loc.strand == Location.Strand.FORWARD:
rev_loc_strand = Location.Strand.REVERSE
else:
rev_loc_strand = Location.Strand.FORWARD
rev_loc_defect = Location.Defect.NONE
if loc.defect & Location.Defect.MISS_LEFT:
rev_loc_defect |= Location.Defect.MISS_RIGHT
if loc.defect & Location.Defect.MISS_RIGHT:
rev_loc_defect |= Location.Defect.MISS_LEFT
if loc.defect & Location.Defect.BEYOND_RIGHT:
rev_loc_defect |= Location.Defect.BEYOND_LEFT
if loc.defect & Location.Defect.BEYOND_LEFT:
rev_loc_defect | |
<reponame>maxmouchet/rtt<filename>localutils/pathtools.py
"""
pathtools.py provides functions handling IP hops, IXP detection and ASN information.
"""
from . import dbtools as db
import os
import copy
import logging
# load database from the local folder
cur_path = os.path.abspath(os.path.dirname(__file__))
as_rel = db.AsRelationDB(os.path.join(cur_path, "db/20161201.as-rel2.txt"))
ip2asn = db.AsnDB(main=os.path.join(cur_path, "db/ipasn.dat"),
reserved=os.path.join(cur_path, "db/reserved_ip.txt"))
ixp_pref = db.IxpPrefixDB(os.path.join(cur_path, "db/ixp_prefixes.txt"))
ixp_member = db.IxpMemberDB(os.path.join(cur_path, "db/ixp_membership.txt"))
def get_ip_info(ip):
"""Query the ASN and IXP information for a given IP address from various data source
Args:
ip (string): ip address, e.g. '172.16.17.32'
Returns:
addr (db.Addr): Addr object, with addr_type attribute set
"""
# first check if it is IXP interconnection
addr = ixp_member.lookup_interco(ip)
if addr is None:
# then check if it belongs to a certian IXP prefix
ixp = ixp_pref.lookup(ip)
if ixp is not None:
addr = db.Addr(addr=ip, addr_type=db.AddrType.IxpPref, ixp=ixp)
else: # finally check if can be found from ip2asn db
asn = ip2asn.lookup(ip)
if type(asn) is int: # if int then returns ASN
addr = db.Addr(addr=ip, addr_type=db.AddrType.Normal, asn=asn)
else: # other type either string for reserved IP blocks or none for not found
addr = db.Addr(addr=ip, addr_type=db.AddrType.Others, desc=asn)
return addr
def bridge(path):
"""given a sequence of IP hops, identify sub-sequences without ASN, and remove only those IPs other than
IXP IPs if the the ASes wrapping the sub-sequence have known relation ship
Args:
path (list of dbtools.Addr): a path composed of IP hops; sub-sequence without ASN can be composed of
IP hops of dbtools.AddrType.IxpPref or dbtools.AddrType.Others.
Return:
list of dbtools.Addr
"""
remove_flag = [False] * len(path) # hop flag to one meant to be removed
asn_path = [hop.asn for hop in path]
holes = find_holes(asn_path) # indexes of None (ASN) sub-sequences
last_idx = len(path) - 1
for start, end in holes:
# only check the sub-sequences having type dbtools.AddrType.Others hops
if start > 0 and end < last_idx and db.AddrType.Others in [hop.type for hop in path[start:end+1]]:
# if there is known relation between the two ASes wrapping the None sub-sequence
left_asn = path[start-1].asn
right_asn = path[end+1].asn
if left_asn == right_asn or as_rel.has_relation((left_asn, right_asn)) is not None:
# remove only the hop of type dbtools.AddrType.Others
for idx in range(start, end+1):
if path[idx].type == db.AddrType.Others:
remove_flag[idx] = True
return [path[idx] for idx in range(last_idx+1) if not remove_flag[idx]]
def find_holes(x):
"""find the beginning and end of continuous None in the given iterator
Args:
x (iterator): the input sequence
Returns:
list of (int, int) indicating the beginning and the end of a continuous None sub-sequence
"""
holes = []
in_hole = False
for idx, val in enumerate(x):
if not in_hole:
if val is None:
start = idx
in_hole = True
else:
if val is not None:
end = idx - 1
in_hole = False
holes.append((start, end))
# in case the iteration ends while still in hole
# test_case = [None, 1, 1, None, 1, None, None, None, 1, None]
if in_hole:
holes.append((start, idx))
return holes
def insert_ixp(path):
"""insert IXP hops according to the presence of IXP address and IXP memebership of surrounding AS
Args:
path (list of db.Addr): a list of hops
Returns:
list of db.Addr
"""
path_len = len(path)
ixp_insertion = []
for idx, hop in enumerate(path):
if (hop.type == db.AddrType.InterCo or hop.type == db.AddrType.IxpPref) and (0 < idx < path_len-1):
# Normal - Interco/IxpPref - Normal
if path[idx-1].type == db.AddrType.Normal and path[idx+1].type == db.AddrType.Normal:
left_hop = path[idx-1]
right_hop = path[idx+1]
# Normal - Interco - Normal
if hop.type == db.AddrType.InterCo:
# ASN: A - A - A -> A - A - A
if left_hop.get_asn() == hop.get_asn() == right_hop.get_asn():
pass
# ASN: A - A - B -> A - A - IXP - B
elif left_hop.get_asn() == hop.get_asn() != right_hop.get_asn():
ixp_insertion.append((idx+1, hop.ixp))
# ASN: A - B - B -> A - IXP - B - B
elif left_hop.get_asn() != hop.get_asn() == right_hop.get_asn():
ixp_insertion.append((idx, hop.ixp))
# ASN: A - B - C
elif left_hop.get_asn() != hop.get_asn() != right_hop.get_asn():
# check IXP member ship
left_is_member = ixp_member.is_member(ixp=hop.ixp, asn=left_hop.asn)
right_is_member = ixp_member.is_member(ixp=hop.ixp, asn=right_hop.asn)
# IXP membership: A -m- B -m- C -> A - IXP - B - IXP - C
if left_is_member and right_is_member:
ixp_insertion.append((idx, hop.ixp))
ixp_insertion.append((idx+1, hop.ixp))
# IXP membership: A -m- B - C -> A - IXP - B - C
elif left_is_member:
ixp_insertion.append((idx, hop.ixp))
# IXP membership: A - B -m- C -> A - B - IXP - C
elif right_is_member:
ixp_insertion.append((idx + 1, hop.ixp))
else:
pass # in this case no IXP hop will be seen in the path
# Normal - IxpPref - Normal
elif hop.type == db.AddrType.IxpPref:
left_is_member = ixp_member.is_member(ixp=hop.ixp, asn=left_hop.asn)
right_is_member = ixp_member.is_member(ixp=hop.ixp, asn=right_hop.asn)
# IXP membership: A -m- IxpPref -m- B -> A - IXP - IxpPref - IXP - B
if left_is_member and right_is_member:
ixp_insertion.append((idx, hop.ixp))
ixp_insertion.append((idx + 1, hop.ixp))
# IXP membership: A -m- IxpPref- B -> A - IXP - IxpPref - B
elif left_is_member:
ixp_insertion.append((idx, hop.ixp))
# IXP membership: A - IxpPref -m- B -> A - IxpPref- IXP - B
elif right_is_member:
ixp_insertion.append((idx + 1, hop.ixp))
else:
pass # in this case no IXP shop shall be seen in the path
# Interco/IxpPref - Inter/IxpPref
elif path[idx+1].type == db.AddrType.InterCo or path[idx+1].type == db.AddrType.IxpPref:
# belong to same IXP
if path[idx].ixp == path[idx+1].ixp:
ixp_insertion.append((idx + 1, hop.ixp))
else:
ixp_insertion.append((idx, hop.ixp))
ixp_insertion.append((idx+1, path[idx+1].ixp))
shift = 0
for ins in ixp_insertion:
path.insert(ins[0]+shift, db.Addr(addr=None, addr_type=db.AddrType.Virtual, ixp=ins[1]))
shift += 1
return path
def remove_repeated_asn(path):
""" remove repeated ASN in the give path
Args:
path (list of ASN): ASN can be int for str if IXP hop
Returns:
list of ASN
"""
removed = []
for idx, hop in enumerate(path):
if idx == 0:
removed.append(hop)
elif hop != path[idx-1]:
removed.append(hop)
return removed
def as_path_change(paths):
""" mark the idx at which AS path changes
Args:
paths (list of list of ASN): [[ASN,...],...]
Returns:
list of int, index of change is set to 1, otherwise 0
"""
change = [0] * len(paths)
for idx, path in enumerate(paths):
if idx > 0:
if path != paths[idx-1]:
change[idx] = 1
return change
def as_path_change_cl(paths):
"""" mark the idx at which there is surely an AS path change not related to timeout, private address etc.
Args:
paths (list of list of ASN): [[ASN,...],...]
Returns:
list of int, index of change is set to 1, otherwise 0
"""
change = [0] * len(paths)
for idx, path in enumerate(paths):
if idx > 0:
if len(path) > 0 and len(paths[idx-1]) > 0:
if path[-1] == paths[idx-1][-1] and path != paths[idx-1]: # exclude reachability issue
diff_as = set(path) ^ set(paths[idx-1])
if len(diff_as) > 0 and all([type(i) is int for i in diff_as]): # all difference is a valid ASN
change[idx] = 1
return change
def as_path_change_cs(paths):
""" mark the idx at which where AS path change happens
AS path change is where the FIRST different AS hops are both valid public ASN hop
avoid changes due to timeout, private address, reachability issues
Args:
paths (list of list of ASN): [[ASN,...],...]
Returns:
list of int, index of change is set to 1, otherwise 0
"""
change = [0] * len(paths)
for idx, path in enumerate(paths):
if idx > 0:
if len(path) > 0 and len(paths[idx-1]) > 0:
for hop_pair in zip(path, paths[idx-1]):
if hop_pair[0] != hop_pair[1]:
if type(hop_pair[0]) is int and type(hop_pair[1]) is int:
change[idx] = 1
break
return change
def is_ixp_asn_hop(x):
""" check the whether return value of db.Addr.get_asn() is an IXP or not
if the type(x) is str and the string is not Invalid IP or reserved IP, than it must be an IXP name
Args:
x (int, string, None)
Returns:
bool
"""
return type(x) is str and not is_bad_hop(x)
def is_bad_hop(x):
""" check the whether return value of db.Addr.get_asn() is an description string of reserved IP blocks or invalid IP address
Args:
x (int, string, None)
Returns:
bool
"""
return x == 'Invalid IP address' or ip2asn.reserved_des is None or x in ip2asn.reserved_des
def as_path_change_ixp(paths):
"""" mark the idx at which there is surely an AS | |
ABSTRACT PROPERTY abs_prop
RENAME TO new_abs_prop;
""")
# Check we can create a new type that uses it
await self.con.execute("""
CREATE TYPE RenameObj2 {
CREATE PROPERTY prop EXTENDING new_abs_prop -> str;
};
""")
# Check we can create a new prop with the same name
await self.con.execute("""
CREATE ABSTRACT PROPERTY abs_prop {
CREATE ANNOTATION title := "lol";
};
""")
await self.con.execute("""
CREATE MODULE foo;
ALTER ABSTRACT PROPERTY new_abs_prop
RENAME TO foo::new_abs_prop2;
""")
await self.con.execute("""
ALTER TYPE RenameObj DROP PROPERTY prop;
ALTER TYPE RenameObj2 DROP PROPERTY prop;
DROP ABSTRACT PROPERTY foo::new_abs_prop2;
""")
async def test_edgeql_ddl_rename_annotated_01(self):
await self.con.execute("""
CREATE TYPE RenameObj {
CREATE PROPERTY prop -> str {
CREATE ANNOTATION title := "lol";
}
};
""")
await self.con.execute("""
ALTER TYPE RenameObj {
ALTER PROPERTY prop RENAME TO prop2;
};
""")
async def test_edgeql_ddl_delete_abs_link_01(self):
# test deleting a trivial abstract link
await self.con.execute("""
CREATE ABSTRACT LINK abs_link;
""")
await self.con.execute("""
DROP ABSTRACT LINK abs_link;
""")
async def test_edgeql_ddl_alias_01(self):
# Issue #1184
await self.con.execute(r"""
CREATE TYPE User {
CREATE REQUIRED PROPERTY name -> str;
};
CREATE TYPE Award {
CREATE LINK user -> User;
};
CREATE ALIAS Alias1 := Award {
user2 := (SELECT .user {name2 := .name ++ '!'})
};
CREATE ALIAS Alias2 := Alias1;
INSERT Award { user := (INSERT User { name := 'Corvo' }) };
""")
await self.assert_query_result(
r'''
SELECT Alias1 {
user2: {
name2
}
}
''',
[{
'user2': {
'name2': 'Corvo!',
},
}],
)
await self.assert_query_result(
r'''
SELECT Alias2 {
user2: {
name2
}
}
''',
[{
'user2': {
'name2': 'Corvo!',
},
}],
)
async def test_edgeql_ddl_alias_02(self):
# Issue #1184
await self.con.execute(r"""
CREATE TYPE User {
CREATE REQUIRED PROPERTY name -> str;
};
CREATE TYPE Award {
CREATE REQUIRED PROPERTY name -> str;
};
CREATE ALIAS Alias1 := Award {
a_user := (SELECT User { name } LIMIT 1)
};
CREATE ALIAS Alias2 := Alias1;
INSERT User { name := 'Corvo' };
INSERT Award { name := 'Rune' };
""")
await self.assert_query_result(
r'''
SELECT Alias1 {
a_user: {
name
}
}
''',
[{
'a_user': {
'name': 'Corvo',
},
}],
)
await self.assert_query_result(
r'''
SELECT Alias2 {
a_user: {
name
}
}
''',
[{
'a_user': {
'name': 'Corvo',
},
}],
)
async def test_edgeql_ddl_alias_03(self):
await self.con.execute(r"""
CREATE ALIAS RenameAlias03 := (
SELECT BaseObject {
alias_computable := 'rename alias 03'
}
);
ALTER ALIAS RenameAlias03 {
RENAME TO NewAlias03;
};
""")
await self.assert_query_result(
r'''
SELECT NewAlias03.alias_computable LIMIT 1;
''',
['rename alias 03']
)
await self.con.execute(r"""
CREATE MODULE foo;
ALTER ALIAS NewAlias03 {
RENAME TO foo::NewAlias03;
};
""")
await self.assert_query_result(
r'''
SELECT foo::NewAlias03.alias_computable LIMIT 1;
''',
['rename alias 03']
)
await self.con.execute(r"""
DROP ALIAS foo::NewAlias03;
""")
async def test_edgeql_ddl_alias_04(self):
await self.con.execute(r"""
CREATE ALIAS DupAlias04_1 := BaseObject {
foo := 'hello world 04'
};
# create an identical alias with a different name
CREATE ALIAS DupAlias04_2 := BaseObject {
foo := 'hello world 04'
};
""")
await self.assert_query_result(
r'''
SELECT DupAlias04_1.foo LIMIT 1;
''',
['hello world 04']
)
await self.assert_query_result(
r'''
SELECT DupAlias04_2.foo LIMIT 1;
''',
['hello world 04']
)
async def test_edgeql_ddl_alias_05(self):
await self.con.execute(r"""
CREATE TYPE BaseType05 {
CREATE PROPERTY name -> str;
};
CREATE ALIAS BT05Alias1 := BaseType05 {
a := .name ++ '_more'
};
# alias of an alias
CREATE ALIAS BT05Alias2 := BT05Alias1 {
b := .a ++ '_stuff'
};
INSERT BaseType05 {name := 'bt05'};
""")
await self.assert_query_result(
r'''
SELECT BT05Alias1 {name, a};
''',
[{
'name': 'bt05',
'a': 'bt05_more',
}]
)
await self.assert_query_result(
r'''
SELECT BT05Alias2 {name, a, b};
''',
[{
'name': 'bt05',
'a': 'bt05_more',
'b': 'bt05_more_stuff',
}]
)
async def test_edgeql_ddl_alias_06(self):
# Issue #1184
await self.con.execute(r"""
CREATE TYPE BaseType06 {
CREATE PROPERTY name -> str;
};
INSERT BaseType06 {
name := 'bt06',
};
INSERT BaseType06 {
name := 'bt06_1',
};
CREATE ALIAS BT06Alias1 := BaseType06 {
a := .name ++ '_a'
};
CREATE ALIAS BT06Alias2 := BT06Alias1 {
b := .a ++ '_b'
};
CREATE ALIAS BT06Alias3 := BaseType06 {
b := BT06Alias1
};
""")
await self.assert_query_result(
r'''
SELECT BT06Alias1 {name, a} FILTER .name = 'bt06';
''',
[{
'name': 'bt06',
'a': 'bt06_a',
}],
)
await self.assert_query_result(
r'''
SELECT BT06Alias2 {name, a, b} FILTER .name = 'bt06';
''',
[{
'name': 'bt06',
'a': 'bt06_a',
'b': 'bt06_a_b',
}],
)
await self.assert_query_result(
r'''
SELECT BT06Alias3 {
name,
b: {name, a} ORDER BY .name
}
FILTER .name = 'bt06';
''',
[{
'name': 'bt06',
'b': [{
'name': 'bt06',
'a': 'bt06_a',
}, {
'name': 'bt06_1',
'a': 'bt06_1_a',
}],
}],
)
async def test_edgeql_ddl_alias_07(self):
# Issue #1187
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
"illegal self-reference in definition of "
"'default::IllegalAlias07'"):
await self.con.execute(r"""
CREATE ALIAS IllegalAlias07 := Object {a := IllegalAlias07};
""")
async def test_edgeql_ddl_alias_08(self):
# Issue #1184
await self.con.execute(r"""
CREATE TYPE BaseType08 {
CREATE PROPERTY name -> str;
};
INSERT BaseType08 {
name := 'bt08',
};
CREATE ALIAS BT08Alias1 := BaseType08 {
a := .name ++ '_a'
};
CREATE ALIAS BT08Alias2 := BT08Alias1 {
b := .a ++ '_b'
};
# drop the freshly created alias
DROP ALIAS BT08Alias2;
# re-create the alias that was just dropped
CREATE ALIAS BT08Alias2 := BT08Alias1 {
b := .a ++ '_bb'
};
""")
await self.assert_query_result(
r'''
SELECT BT08Alias1 {name, a} FILTER .name = 'bt08';
''',
[{
'name': 'bt08',
'a': 'bt08_a',
}],
)
await self.assert_query_result(
r'''
SELECT BT08Alias2 {name, a, b} FILTER .name = 'bt08';
''',
[{
'name': 'bt08',
'a': 'bt08_a',
'b': 'bt08_a_bb',
}],
)
async def test_edgeql_ddl_alias_09(self):
await self.con.execute(r"""
CREATE ALIAS CreateAlias09 := (
SELECT BaseObject {
alias_computable := 'rename alias 03'
}
);
""")
async with self.assertRaisesRegexTx(
edgedb.InvalidLinkTargetError,
"invalid link type: 'default::CreateAlias09' is an"
" expression alias, not a proper object type",
):
await self.con.execute(r"""
CREATE TYPE AliasType09 {
CREATE OPTIONAL SINGLE LINK a -> CreateAlias09;
}
""")
async def test_edgeql_ddl_inheritance_alter_01(self):
await self.con.execute(r"""
CREATE TYPE InhTest01 {
CREATE PROPERTY testp -> int64;
};
CREATE TYPE InhTest01_child EXTENDING InhTest01;
""")
await self.con.execute("""
ALTER TYPE InhTest01 {
DROP PROPERTY testp;
}
""")
async def test_edgeql_ddl_inheritance_alter_02(self):
await self.con.execute(r"""
CREATE TYPE InhTest01 {
CREATE PROPERTY testp -> int64;
};
CREATE TYPE InhTest01_child EXTENDING InhTest01;
""")
with self.assertRaisesRegex(
edgedb.SchemaError,
"cannot drop inherited property 'testp'"):
await self.con.execute("""
ALTER TYPE InhTest01_child {
DROP PROPERTY testp;
}
""")
async def test_edgeql_ddl_inheritance_alter_03(self):
await self.con.execute(r"""
CREATE TYPE Owner;
CREATE TYPE Stuff1 {
# same link name, but NOT related via explicit inheritance
CREATE LINK owner -> Owner
};
CREATE TYPE Stuff2 {
# same link name, but NOT related via explicit inheritance
CREATE LINK owner -> Owner
};
""")
await self.assert_query_result("""
SELECT Owner.<owner;
""", [])
async def test_edgeql_ddl_inheritance_alter_04(self):
await self.con.execute(r"""
CREATE TYPE InhTest04 {
CREATE PROPERTY testp -> int64;
};
CREATE TYPE InhTest04_child EXTENDING InhTest04;
""")
await self.con.execute(r"""
ALTER TYPE InhTest04_child {
ALTER PROPERTY testp {
SET default := 42;
};
};
""")
await self.assert_query_result(
r"""
SELECT schema::ObjectType {
properties: {
name,
default,
}
FILTER .name = 'testp',
}
FILTER .name = 'default::InhTest04_child';
""",
[{
'properties': [{
'name': 'testp',
'default': '42',
}],
}],
)
async def test_edgeql_ddl_constraint_01(self):
# Test that the inherited constraint doesn't end up with some
# bad name like 'default::std::exclusive'.
await self.con.execute(r"""
CREATE ABSTRACT TYPE BaseTypeCon01;
CREATE TYPE TypeCon01 EXTENDING BaseTypeCon01;
ALTER TYPE BaseTypeCon01
CREATE SINGLE PROPERTY name -> std::str;
# make sure that we can create a constraint in the base
# type now
ALTER TYPE BaseTypeCon01
ALTER PROPERTY name
CREATE DELEGATED CONSTRAINT exclusive;
""")
await self.assert_query_result("""
WITH MODULE schema
SELECT ObjectType {
name,
properties: {
name,
constraints: {
name,
delegated,
}
} FILTER .name = 'name'
}
FILTER .name LIKE 'default::%TypeCon01'
ORDER BY .name;
""", [
{
'name': 'default::BaseTypeCon01',
'properties': [{
'name': 'name',
'constraints': [{
'name': 'std::exclusive',
'delegated': True,
}],
}]
},
{
'name': 'default::TypeCon01',
'properties': [{
'name': 'name',
'constraints': [{
'name': 'std::exclusive',
'delegated': False,
}],
}]
}
])
async def test_edgeql_ddl_constraint_02(self):
# Regression test for #1441.
with self.assertRaisesRegex(
edgedb.InvalidConstraintDefinitionError,
"must define parameters"
):
async with self._run_and_rollback():
await self.con.execute('''
CREATE ABSTRACT CONSTRAINT aaa EXTENDING max_len_value;
| |
kills the scheduler
"""
def __init__ (self):
pass
def execute (self, task, scheduler):
scheduler.quit()
class Sleep (BlockingOperation):
"""
Sleep for specified amount of time (seconds)
None means unscheduler (i.e., sleep until an outside force wakes it)
0 means reschedule for later (no additional time)
"""
def __init__ (self, timeToWake = None, absoluteTime = False):
if absoluteTime == False and timeToWake != None: timeToWake += time.time()
self._t = timeToWake
def execute (self, task, scheduler):
if self._t is None:
# Just unschedule
return
if self._t is 0 or self._t < time.time():
# Just reschedule
scheduler.fast_schedule(task)
return
scheduler._selectHub.registerTimer(task, self._t, True) # A bit ugly
class Select (BlockingOperation):
"""
Should be very similar to Python select.select()
"""
def __init__ (self, *args, **kw):
self._args = args
self._kw = kw
def execute (self, task, scheduler):
scheduler._selectHub.registerSelect(task, *self._args, **self._kw)
defaultRecvFlags = 0
try:
defaultRecvFlags = socket.MSG_DONTWAIT
except:
pass
class Recv (BlockingOperation):
def __init__ (self, fd, bufsize = 1024*8, flags = defaultRecvFlags,
timeout = None):
"""
Recv call on fd.
"""
self._fd = fd
self._length = bufsize
self._timeout = timeout
self._flags = flags
def _recvReturnFunc (self, task):
# Select() will have placed file descriptors in rv
if len(task.rv[2]) != 0 or len(task.rv[0]) == 0:
# Socket error
task.rv = None
return None
sock = task.rv[0][0]
task.rv = None
try:
return sock.recv(self._length, self._flags)
except:
traceback.print_exc()
return None #
def execute (self, task, scheduler):
task.rf = self._recvReturnFunc
scheduler._selectHub.registerSelect(task, [self._fd], None, [self._fd],
timeout=self._timeout)
class Send (BlockingOperation):
def __init__ (self, fd, data):
self._fd = fd
self._data = data
self._sent = 0
self._scheduler = None
def _sendReturnFunc (self, task):
# Select() will have placed file descriptors in rv
sock = task.rv[1]
if len(task.rv[2]) != 0:
# Socket error
task.rv = None
return self._sent
task.rv = None
try:
if len(self._data) > 1024:
data = self._data[:1024]
self._data = self._data[1024:]
l = sock.send(data, flags = socket.MSG_DONTWAIT)
self._sent += l
if l == len(data) and len(self._data) == 0:
return self._sent
self._data = data[l:] + self._data
except:
pass
# Still have data to send...
self.execute(task, self._scheduler)
return ABORT
def execute (self, task, scheduler):
self._scheduler = scheduler
task.rf = self._sendReturnFunc
scheduler._selectHub.registerSelect(task, None, [self._fd], [self._fd])
#TODO: just merge this in with Scheduler?
class SelectHub (object):
"""
This class is a single select() loop that handles all Select() requests for
a scheduler as well as timed wakes (i.e., Sleep()).
"""
def __init__ (self, scheduler, useEpoll=False):
# We store tuples of (elapse-time, task)
self._sleepers = [] # Sleeping items stored as a heap
self._incoming = Queue() # Threadsafe queue for new items
self._scheduler = scheduler
self._pinger = pox.lib.util.makePinger()
self.epoll = EpollSelect() if useEpoll else None
self._ready = False
self._thread = Thread(target = self._threadProc)
self._thread.daemon = True
self._thread.start()
# Ugly busy wait for initialization
#while self._ready == False:
def _threadProc (self):
tasks = {}
timeouts = []
rets = {}
while self._scheduler._hasQuit == False:
#print("SelectHub cycle")
if len(timeouts) == 0:
timeout = None
else:
timeout = self._sleepers[0][0] - time.time()
if timeout < 0: timeout = 0
#NOTE: Everything you select on eventually boils down to file descriptors,
# which are unique, obviously. It might be possible to leverage this
# to reduce hashing cost (i.e. by picking a really good hashing
# function), though this is complicated by wrappers, etc...
rl = {}
wl = {}
xl = {}
timeout = None
timeoutTask = None
now = time.time()
expired = None
for t,trl,twl,txl,tto in tasks.itervalues():
if tto != None:
if tto <= now:
# Already expired
if expired is None: expired = []
expired.append(t)
if tto-now > 0.1: print("preexpired",tto,now,tto-now)
continue
tt = tto - now
if tt < timeout or timeout is None:
timeout = tt
timeoutTask = t
if trl:
for i in trl: rl[i] = t
if twl:
for i in twl: wl[i] = t
if txl:
for i in txl: xl[i] = t
if expired:
for t in expired:
del tasks[t]
self._return(t, ([],[],[]))
if timeout is None: timeout = CYCLE_MAXIMUM
if self.epoll:
ro, wo, xo = self.epoll.select( rl.keys() + [self._pinger],
wl.keys(),
xl.keys(), timeout )
else:
ro, wo, xo = select.select( rl.keys() + [self._pinger],
wl.keys(),
xl.keys(), timeout )
if len(ro) == 0 and len(wo) == 0 and len(xo) == 0 and timeoutTask != None:
# IO is idle - dispatch timers / release timeouts
del tasks[timeoutTask]
self._return(timeoutTask, ([],[],[]))
else:
# We have IO events
if self._pinger in ro:
self._pinger.pongAll()
while not self._incoming.empty():
stuff = self._incoming.get(True)
task = stuff[0]
assert task not in tasks
tasks[task] = stuff
self._incoming.task_done()
if len(ro) == 1 and len(wo) == 0 and len(xo) == 0:
# Just recycle
continue
ro.remove(self._pinger)
# At least one thread is going to be resumed
for i in ro:
task = rl[i]
if task not in rets: rets[task] = ([],[],[])
rets[task][0].append(i)
for i in wo:
task = wl[i]
if task not in rets: rets[task] = ([],[],[])
rets[task][1].append(i)
for i in xo:
task = xl[i]
if task not in rets: rets[task] = ([],[],[])
rets[task][2].append(i)
for t,v in rets.iteritems():
del tasks[t]
self._return(t, v)
rets.clear()
def registerSelect (self, task, rlist = None, wlist = None, xlist = None,
timeout = None, timeIsAbsolute = False):
if not timeIsAbsolute:
if timeout != None:
timeout += time.time()
self._incoming.put((task, rlist, wlist, xlist, timeout))
self._cycle()
def _cycle (self):
"""
Cycle the wait thread so that new timers or FDs can be picked up
"""
self._pinger.ping()
def registerTimer (self, task, timeToWake, timeIsAbsolute = False):
"""
Register a task to be wakened up interval units in the future.
It means timeToWake seconds in the future if absoluteTime is False.
"""
return self.registerSelect(task, None, None, None, timeToWake,
timeIsAbsolute)
def _return (self, sleepingTask, returnVal):
#print("reschedule", sleepingTask)
sleepingTask.rv = returnVal
self._scheduler.fast_schedule(sleepingTask)
class ScheduleTask (BaseTask):
"""
If multiple real threads (such as a recoco scheduler thread and any
other thread, or any two other threads) try to schedule ("wake") the
same Task with Scheduler.fast_schedule(), there is a race condition where
the Task may get scheduled multiple times, which is probably quite bad.
Scheduler.schedule() fixes this by creating one of these ScheduleTasks,
and it's this ScheduleTask that actually calls fast_schedule(). This
way, the Task is only ever *really* scheduled from the scheduler thread
and the race condition doesn't exist.
"""
def __init__ (self, scheduler, task):
BaseTask.__init__(self)
self._scheduler = scheduler
self._task = task
def run (self):
#TODO: Refactor the following, since it is copy/pasted from schedule().
if self._task in self._scheduler._ready:
# It might make sense to keep a flag on the task, since checking
# if it's in the ready list is not very efficient.
# Not sure if it makes sense to print out a message here or not.
import logging
logging.getLogger("recoco").info("Task %s scheduled multiple " +
"times", self._task)
else:
self._scheduler.fast_schedule(self._task, True)
yield False
class SyncTask (BaseTask):
def __init__ (self, *args, **kw):
BaseTask.__init__(self)
self.inlock = threading.Lock()
self.outlock = threading.Lock()
self.inlock.acquire()
self.outlock.acquire()
def run (self):
self.inlock.release()
self.outlock.acquire()
class Synchronizer (object):
def __init__ (self, scheduler = None):
if scheduler is None:
scheduler = defaultScheduler
self.scheduler = scheduler
self.syncer = None
self.enter = 0
def __enter__ (self):
self.enter += 1
if self.enter == 1:
self.syncer = SyncTask()
self.syncer.start(self.scheduler) #NOTE: maybe add it to head of list?
self.syncer.inlock.acquire()
return self.syncer
def __exit__ (self, type_, value, traceback):
self.enter -= 1
if self.enter == 0:
self.syncer.outlock.release()
class Timer (Task):
"""
A simple timer.
timeToWake Amount of time to wait before calling callback (seconds)
callback Some callable to be called when the timer expires
absoluteTime A specific time to fire (as from time.time())
recurring Whether to call repeatedly or just once
args, kw Args and keyword args for the callback
scheduler The recoco scheduler to use (None means default scheduler)
started If False, requires you to call .start() to begin timer
selfStoppable If True, the callback can return False to cancel the timer
"""
def __init__ (self, timeToWake, callback, absoluteTime = False,
recurring = False, args = (), kw = {}, scheduler = None,
started = True, selfStoppable = True):
if absoluteTime and recurring:
raise RuntimeError("Can't have | |
'qiáo',
0x77A8: 'pú',
0x77A9: 'zhǔ',
0x77AA: 'dèng',
0x77AB: 'shěn',
0x77AC: 'shùn',
0x77AD: 'liǎo,liào',
0x77AE: 'chè',
0x77AF: 'xián,jiàn',
0x77B0: 'kàn',
0x77B1: 'yè',
0x77B2: 'xuè',
0x77B3: 'tóng',
0x77B4: 'wǔ,mí',
0x77B5: 'lín',
0x77B6: 'guì,kuì',
0x77B7: 'jiàn',
0x77B8: 'yè',
0x77B9: 'ài',
0x77BA: 'huì',
0x77BB: 'zhān',
0x77BC: 'jiǎn',
0x77BD: 'gǔ',
0x77BE: 'zhào',
0x77BF: 'qú,jù',
0x77C0: 'wéi',
0x77C1: 'chǒu',
0x77C2: 'sào',
0x77C3: 'nǐng,chēng',
0x77C4: 'xūn',
0x77C5: 'yào',
0x77C6: 'huò,yuè',
0x77C7: 'mēng',
0x77C8: 'mián',
0x77C9: 'pín',
0x77CA: 'mián',
0x77CB: 'lěi',
0x77CC: 'kuàng,guō',
0x77CD: 'jué',
0x77CE: 'xuān',
0x77CF: 'mián',
0x77D0: 'huò',
0x77D1: 'lú',
0x77D2: 'méng,měng',
0x77D3: 'lóng',
0x77D4: 'guàn,quán',
0x77D5: 'mǎn,mán',
0x77D6: 'xǐ',
0x77D7: 'chù',
0x77D8: 'tǎng',
0x77D9: 'kàn',
0x77DA: 'zhǔ',
0x77DB: 'máo',
0x77DC: 'jīn,qín,guān',
0x77DD: 'jīn,qín,guān',
0x77DE: 'yù,xù,jué',
0x77DF: 'shuò',
0x77E0: 'zé',
0x77E1: 'jué',
0x77E2: 'shǐ',
0x77E3: 'yǐ',
0x77E4: 'shěn',
0x77E5: 'zhī,zhì',
0x77E6: 'hóu,hòu',
0x77E7: 'shěn',
0x77E8: 'yǐng',
0x77E9: 'jǔ',
0x77EA: 'zhōu',
0x77EB: 'jiǎo,jiáo',
0x77EC: 'cuó',
0x77ED: 'duǎn',
0x77EE: 'ǎi',
0x77EF: 'jiǎo,jiáo',
0x77F0: 'zēng',
0x77F1: 'yuē',
0x77F2: 'bà',
0x77F3: 'shí,dàn',
0x77F4: 'dìng',
0x77F5: 'qì',
0x77F6: 'jī',
0x77F7: 'zǐ',
0x77F8: 'gān',
0x77F9: 'wù',
0x77FA: 'zhé',
0x77FB: 'kū',
0x77FC: 'gāng,qiāng,kòng',
0x77FD: 'xī',
0x77FE: 'fán',
0x77FF: 'kuàng',
0x7800: 'dàng',
0x7801: 'mǎ',
0x7802: 'shā',
0x7803: 'dān',
0x7804: 'jué',
0x7805: 'lì',
0x7806: 'fū',
0x7807: 'mín',
0x7808: 'è',
0x7809: 'xū,huā',
0x780A: 'kāng',
0x780B: 'zhǐ',
0x780C: 'qì,qiè',
0x780D: 'kǎn',
0x780E: 'jiè',
0x780F: 'pīn,bīn,fēn',
0x7810: 'è',
0x7811: 'yà',
0x7812: 'pī',
0x7813: 'zhé',
0x7814: 'yán,yàn',
0x7815: 'suì',
0x7816: 'zhuān',
0x7817: 'chē',
0x7818: 'dùn',
0x7819: 'wǎ',
0x781A: 'yàn',
0x781B: 'jīn',
0x781C: 'fēng',
0x781D: 'fǎ',
0x781E: 'mò',
0x781F: 'zhǎ',
0x7820: 'jū',
0x7821: 'yù',
0x7822: 'kē,luǒ',
0x7823: 'tuó',
0x7824: 'tuó',
0x7825: 'dǐ',
0x7826: 'zhài',
0x7827: 'zhēn',
0x7828: 'ě',
0x7829: 'fú,fèi',
0x782A: 'mǔ',
0x782B: 'zhù,zhǔ',
0x782C: 'lì,lā,lá',
0x782D: 'biān',
0x782E: 'nǔ',
0x782F: 'pīng',
0x7830: 'pēng',
0x7831: 'líng',
0x7832: 'pào',
0x7833: 'lè',
0x7834: 'pò',
0x7835: 'bō',
0x7836: 'pò',
0x7837: 'shēn',
0x7838: 'zá',
0x7839: 'ài',
0x783A: 'lì',
0x783B: 'lóng',
0x783C: 'tóng',
0x783D: 'yòng',
0x783E: 'lì',
0x783F: 'kuàng',
0x7840: 'chǔ',
0x7841: 'kēng',
0x7842: 'quán',
0x7843: 'zhū',
0x7844: 'kuāng,guāng',
0x7845: 'guī',
0x7846: 'è',
0x7847: 'náo',
0x7848: 'qià',
0x7849: 'lù',
0x784A: 'wěi,guì',
0x784B: 'ài',
0x784C: 'luò,gè',
0x784D: 'kèn,xiàn,gǔn,yǐn',
0x784E: 'xíng',
0x784F: 'yán,yàn',
0x7850: 'dòng',
0x7851: 'pēng,píng',
0x7852: 'xī',
0x7853: 'lǎo',
0x7854: 'hóng',
0x7855: 'shuò,shí',
0x7856: 'xiá',
0x7857: 'qiāo',
0x7858: 'qíng',
0x7859: 'wéi,wèi,ái,gài',
0x785A: 'qiáo',
0x785B: 'ceok',
0x785C: 'kēng',
0x785D: 'xiāo',
0x785E: 'què,kè,kù',
0x785F: 'chàn',
0x7860: 'láng',
0x7861: 'hōng',
0x7862: 'yù',
0x7863: 'xiāo',
0x7864: 'xiá',
0x7865: 'mǎng,bàng',
0x7866: 'luò,lòng',
0x7867: 'yǒng,tóng',
0x7868: 'chē',
0x7869: 'chè',
0x786A: 'wò',
0x786B: 'liú',
0x786C: 'yìng',
0x786D: 'máng',
0x786E: 'què',
0x786F: 'yàn',
0x7870: 'shā',
0x7871: 'kǔn',
0x7872: 'yù',
0x7873: 'ceok',
0x7874: 'huā',
0x7875: 'lǔ',
0x7876: 'chěn',
0x7877: 'jiǎn',
0x7878: 'nüè',
0x7879: 'sōng',
0x787A: 'zhuó',
0x787B: 'kēng,kěng',
0x787C: 'péng',
0x787D: 'yān,yǎn',
0x787E: 'zhuì,chuí,duǒ',
0x787F: 'kōng',
0x7880: 'chēng',
0x7881: 'qí',
0x7882: 'zòng,cóng',
0x7883: 'qìng',
0x7884: 'lín',
0x7885: 'jūn',
0x7886: 'bō',
0x7887: 'dìng',
0x7888: 'mín',
0x7889: 'diāo',
0x788A: 'jiān,zhàn',
0x788B: 'hè',
0x788C: 'lù,liù',
0x788D: 'ài',
0x788E: 'suì',
0x788F: 'què,xī',
0x7890: 'léng',
0x7891: 'bēi',
0x7892: 'yín',
0x7893: 'duì',
0x7894: 'wǔ',
0x7895: 'qí',
0x7896: 'lún,lǔn,lùn',
0x7897: 'wǎn',
0x7898: 'diǎn',
0x7899: 'náo,gāng',
0x789A: 'bèi',
0x789B: 'qì',
0x789C: 'chěn',
0x789D: 'ruǎn',
0x789E: 'yán',
0x789F: 'dié',
0x78A0: 'dìng',
0x78A1: 'zhóu',
0x78A2: 'tuó',
0x78A3: 'jié,yà',
0x78A4: 'yīng',
0x78A5: 'biǎn',
0x78A6: 'kè',
0x78A7: 'bì',
0x78A8: 'wěi,wèi',
0x78A9: 'shuò,shí',
0x78AA: 'zhēn',
0x78AB: 'duàn',
0x78AC: 'xiá',
0x78AD: 'dàng',
0x78AE: 'tí,dī',
0x78AF: 'nǎo',
0x78B0: 'pèng',
0x78B1: 'jiǎn',
0x78B2: 'dì',
0x78B3: 'tàn',
0x78B4: 'chá,chā',
0x78B5: 'tián',
0x78B6: 'qì',
0x78B7: 'dùn',
0x78B8: 'fēng',
0x78B9: 'xuàn',
0x78BA: 'què',
0x78BB: 'què,qiāo',
0x78BC: 'mǎ',
0x78BD: 'gōng',
0x78BE: 'niǎn',
0x78BF: 'sù,xiè',
0x78C0: 'é',
0x78C1: 'cí',
0x78C2: 'liú,liù',
0x78C3: 'sī,tí',
0x78C4: 'táng',
0x78C5: 'bàng,páng',
0x78C6: 'huá,kě,gū',
0x78C7: 'pī',
0x78C8: 'kuǐ,wěi',
0x78C9: 'sǎng',
0x78CA: 'lěi',
0x78CB: 'cuō',
0x78CC: 'tián',
0x78CD: 'xiá,qià,yà',
0x78CE: 'xī',
0x78CF: 'lián,qiān',
0x78D0: 'pán',
0x78D1: 'ái,wèi',
0x78D2: 'yǔn',
0x78D3: 'duī',
0x78D4: 'zhé',
0x78D5: 'kē',
0x78D6: 'lá,lā',
0x78D7: 'zhuān',
0x78D8: 'yáo',
0x78D9: 'gǔn',
0x78DA: 'zhuān',
0x78DB: 'chán',
0x78DC: 'qì',
0x78DD: 'áo,qiāo',
0x78DE: 'pēng,pèng',
0x78DF: 'liù',
0x78E0: 'lǔ',
0x78E1: 'kàn',
0x78E2: 'chuǎng',
0x78E3: 'chěn',
0x78E4: 'yīn,yǐn',
0x78E5: 'lěi,léi',
0x78E6: 'biāo',
0x78E7: 'qì',
0x78E8: 'mó,mò',
0x78E9: 'qì,zhú',
0x78EA: 'cuī',
0x78EB: 'zōng',
0x78EC: 'qìng',
0x78ED: 'chuò',
0x78EE: 'lún',
0x78EF: 'jī',
0x78F0: 'shàn',
0x78F1: 'láo,luò',
0x78F2: 'qú',
0x78F3: 'zēng',
0x78F4: 'dèng',
0x78F5: 'jiàn',
0x78F6: 'xì',
0x78F7: 'lín',
0x78F8: 'dìng',
0x78F9: 'diàn',
0x78FA: 'huáng',
0x78FB: 'pán,bō',
0x78FC: 'jí,shé',
0x78FD: 'qiāo',
0x78FE: 'dī',
0x78FF: 'lì',
0x7900: 'jiàn',
0x7901: 'jiāo',
0x7902: 'xī',
0x7903: 'zhǎng',
0x7904: 'qiáo',
0x7905: 'dūn',
0x7906: 'jiǎn',
0x7907: 'yù',
0x7908: 'zhuì',
0x7909: 'hé,qiāo,qiào',
0x790A: 'kè,huò',
0x790B: 'zé',
0x790C: 'léi,lěi',
0x790D: 'jié',
0x790E: 'chǔ',
0x790F: 'yè',
0x7910: 'què,hú',
0x7911: 'dàng',
0x7912: 'yǐ',
0x7913: 'jiāng',
0x7914: 'pī',
0x7915: 'pī',
0x7916: 'yù',
0x7917: 'pīn',
0x7918: 'è,qì',
0x7919: 'ài',
0x791A: 'kē',
0x791B: 'jiān',
0x791C: 'yù',
0x791D: 'ruǎn',
0x791E: 'méng',
0x791F: 'pào',
0x7920: 'cí',
0x7921: 'bō',
0x7922: 'yǎng',
0x7923: 'miè',
0x7924: 'cǎ',
0x7925: 'xián,xín',
0x7926: 'kuàng',
0x7927: 'léi,lěi,lèi',
0x7928: 'lěi',
0x7929: 'zhì',
0x792A: 'lì',
0x792B: 'lì',
0x792C: 'fán',
0x792D: 'què',
0x792E: 'pào',
0x792F: 'yīng',
0x7930: 'lì',
0x7931: 'lóng',
0x7932: 'lóng',
0x7933: 'mò',
0x7934: 'bó',
0x7935: 'shuāng',
0x7936: 'guàn',
0x7937: 'jiān',
0x7938: 'cǎ',
0x7939: 'yán,yǎn',
0x793A: 'shì',
0x793B: 'shì',
0x793C: 'lǐ',
0x793D: 'réng',
0x793E: 'shè',
0x793F: 'yuè',
0x7940: 'sì',
0x7941: 'qí',
0x7942: 'tā',
0x7943: 'mà',
0x7944: 'xiè',
0x7945: 'yāo',
0x7946: 'xiān',
0x7947: 'zhǐ,qí',
0x7948: 'qí',
0x7949: 'zhǐ',
0x794A: 'bēng,fāng',
0x794B: 'duì',
0x794C: 'zhòng',
0x794D: 'rèn',
0x794E: 'yī',
0x794F: 'shí',
0x7950: 'yòu',
0x7951: 'zhì',
0x7952: 'tiáo',
0x7953: 'fú',
0x7954: 'fù',
0x7955: 'mì,bì',
0x7956: 'zǔ',
0x7957: 'zhī',
0x7958: 'suàn',
0x7959: 'mèi',
0x795A: 'zuò',
0x795B: 'qū',
0x795C: 'hù',
0x795D: 'zhù',
0x795E: 'shén',
0x795F: 'suì',
0x7960: 'cí',
0x7961: 'chái',
0x7962: 'mí',
0x7963: 'lǚ',
0x7964: 'yǔ',
0x7965: 'xiáng',
0x7966: 'wú',
0x7967: 'tiāo',
0x7968: 'piào,piāo',
0x7969: 'zhù',
0x796A: 'guǐ',
0x796B: 'xiá',
0x796C: 'zhī',
0x796D: 'jì,zhài',
0x796E: 'gào',
0x796F: 'zhēn',
0x7970: 'gào',
0x7971: 'shuì,lèi',
0x7972: 'jìn',
0x7973: 'shèn',
0x7974: 'gāi',
0x7975: 'kǔn',
0x7976: 'dì',
0x7977: 'dǎo',
0x7978: 'huò',
0x7979: 'táo',
0x797A: 'qí',
0x797B: 'gù',
0x797C: 'guàn',
0x797D: 'zuì',
0x797E: 'líng',
0x797F: 'lù',
0x7980: 'bǐng',
0x7981: 'jīn,jìn',
0x7982: 'dǎo',
0x7983: 'zhí',
0x7984: 'lù',
0x7985: 'chán,shàn',
0x7986: 'bì,pí',
0x7987: 'chǔ',
0x7988: 'huī',
0x7989: 'yǒu',
0x798A: 'xì',
0x798B: 'yīn',
0x798C: 'zī',
0x798D: 'huò',
0x798E: 'zhēn',
0x798F: 'fú',
0x7990: 'yuàn',
0x7991: 'xú',
0x7992: 'xiǎn',
0x7993: 'shāng,yáng',
0x7994: 'tí,zhǐ',
0x7995: 'yī',
0x7996: 'méi',
0x7997: 'sī',
0x7998: 'dì',
0x7999: 'bèi',
0x799A: 'zhuó',
0x799B: 'zhēn',
0x799C: 'yíng',
0x799D: 'jì',
0x799E: 'gào',
0x799F: 'táng',
0x79A0: 'sī',
0x79A1: 'mà',
0x79A2: 'tà',
0x79A3: 'fù',
0x79A4: 'xuān',
0x79A5: 'qí',
0x79A6: 'yù',
0x79A7: 'xǐ',
0x79A8: 'jī,jì',
0x79A9: 'sì',
0x79AA: 'shàn,chán',
0x79AB: 'dàn',
0x79AC: 'guì',
0x79AD: 'suì',
0x79AE: 'lǐ',
0x79AF: 'nóng',
0x79B0: 'mí',
0x79B1: 'dǎo',
0x79B2: 'lì',
0x79B3: 'ráng',
0x79B4: 'yuè',
0x79B5: 'tí',
0x79B6: 'zàn',
0x79B7: 'lèi',
0x79B8: 'róu',
0x79B9: 'yǔ',
0x79BA: 'yú,yù,ǒu',
0x79BB: 'lí',
0x79BC: 'xiè',
0x79BD: 'qín',
0x79BE: 'hé',
0x79BF: 'tū',
0x79C0: 'xiù',
0x79C1: 'sī',
0x79C2: 'rén',
0x79C3: 'tū',
0x79C4: 'zǐ,zì',
0x79C5: 'chá,ná',
0x79C6: 'gǎn',
0x79C7: 'yì,zhí',
0x79C8: 'xiān',
0x79C9: 'bǐng',
0x79CA: 'nián',
0x79CB: 'qiū',
0x79CC: 'qiū',
0x79CD: 'zhǒng,zhòng,chóng',
0x79CE: 'fèn',
0x79CF: 'hào,mào',
0x79D0: 'yún',
0x79D1: 'kē',
0x79D2: 'miǎo',
0x79D3: 'zhī',
0x79D4: 'jīng',
0x79D5: 'bǐ',
0x79D6: 'zhǐ',
0x79D7: 'yù',
0x79D8: 'mì,bì',
0x79D9: 'kù,kū',
0x79DA: 'bàn',
0x79DB: 'pī',
0x79DC: 'ní,nì',
0x79DD: 'lì',
0x79DE: 'yóu',
0x79DF: 'zū',
0x79E0: 'pī',
0x79E1: 'bó',
0x79E2: 'líng',
0x79E3: 'mò',
0x79E4: 'chèng',
0x79E5: 'nián',
0x79E6: 'qín',
0x79E7: 'yāng',
0x79E8: 'zuó',
0x79E9: 'zhì',
0x79EA: 'dī',
0x79EB: 'shú',
0x79EC: 'jù',
0x79ED: 'zǐ',
0x79EE: 'huó,kuò',
0x79EF: 'jī',
0x79F0: 'chēng,chèn,chèng',
0x79F1: 'tóng',
0x79F2: 'shì,zhì',
0x79F3: 'huó,kuò',
0x79F4: 'huō',
0x79F5: 'yīn',
0x79F6: 'zī',
0x79F7: 'zhì',
0x79F8: 'jiē',
0x79F9: 'rěn',
0x79FA: 'dù',
0x79FB: 'yí',
0x79FC: 'zhū',
0x79FD: 'huì',
0x79FE: 'nóng',
0x79FF: 'fù,pū',
0x7A00: 'xī',
0x7A01: 'gǎo',
0x7A02: 'láng',
0x7A03: 'fū',
0x7A04: 'xùn,zè',
0x7A05: 'shuì',
0x7A06: 'lǚ',
0x7A07: 'kǔn',
0x7A08: 'gǎn',
0x7A09: 'jīng',
0x7A0A: 'tí',
0x7A0B: 'chéng',
0x7A0C: 'tú,shǔ',
0x7A0D: 'shāo,shào',
0x7A0E: 'shuì',
0x7A0F: 'yà',
0x7A10: 'lǔn',
0x7A11: 'lù',
0x7A12: 'gū',
0x7A13: 'zuó',
0x7A14: 'rěn',
0x7A15: 'zhùn,zhǔn',
0x7A16: 'bàng',
0x7A17: 'bài',
0x7A18: 'jī,qí',
0x7A19: 'zhī',
0x7A1A: 'zhì',
0x7A1B: 'kǔn',
0x7A1C: 'léng,lēng,líng',
0x7A1D: 'péng',
0x7A1E: 'kē',
0x7A1F: 'bǐng',
0x7A20: 'chóu',
0x7A21: 'zuì,zú,sū',
0x7A22: 'yù',
0x7A23: 'sū',
0x7A24: 'lüè',
0x7A25: 'xiāng',
0x7A26: 'yī',
0x7A27: | |
self.object,
nazwa_urzadzenia = 'Czujnik objętości cieczy',
nr_urzadzenia = 'x',
opis_czynnosci = 'Legalizacja ponowna',
jednostka_dozorowa = 'OUM w Zielonej Górze',
data_ostatniej_czynnosci = now,
nr_decyzji = 'Świadectwo Legalizacji ponownej znak wniosku: WZ4.400.864.18.2018',
data_najblizszej_czynnosci = now,
osoba_odpowiedzialna_za_nadzor = '<NAME>',
uwagi = 'brak'
)
PrzegladyTechniczne.objects.create(
obiektk = self.object,
nazwa_urzadzenia = 'Odmierzacz paliw typ Helix 6000 model C(NH/LM)33-33 nr',
nr_urzadzenia = '...',
opis_czynnosci = 'Przegląd stanu technicznego odmierzacza',
jednostka_kontrolujaca = '<NAME>',
data_ostatniej_czynnosci = now,
nr_protokolu = 'x',
data_najblizszej_czynnosci = now,
osoba_odpowiedzialna_za_nadzor = '<NAME>',
uwagi = 'brak'
)
PrzegladyTechniczne.objects.create(
obiektk = self.object,
nazwa_urzadzenia = 'Odmierzacz paliw typ Helix 6000 model C(NH/LM)33-33 nr',
nr_urzadzenia = '...',
opis_czynnosci = 'badanie VRS',
jednostka_kontrolujaca = 'GST Grzegorz Staszak',
data_ostatniej_czynnosci = now,
nr_protokolu = 'x',
data_najblizszej_czynnosci = now,
osoba_odpowiedzialna_za_nadzor = '<NAME>',
uwagi = 'brak'
)
PrzegladyTechniczne.objects.create(
obiektk = self.object,
nazwa_urzadzenia = 'Odmierzacz paliw typ Helix 6000 model C(NH/LM)33-33 nr',
nr_urzadzenia = '...',
opis_czynnosci = 'Pomiary instalacji elektrycznej, odgromowej oraz badania oporności węży',
jednostka_kontrolujaca = 'Baza Paliw Sp. z o.o.',
data_ostatniej_czynnosci = now,
nr_protokolu = 'x',
data_najblizszej_czynnosci = now,
osoba_odpowiedzialna_za_nadzor = '<NAME>',
uwagi = 'brak'
)
return super().form_valid(form)
class DodajPawilonCreateView(LoginRequiredMixin, generic.CreateView):
form_class = ObiektKForm
model = ObiektK
success_url = '/dodane/'
template_name = "baza/dodaj-obiekt-initial.html"
initial = {
'nazwa': 'Pawilon TRIP FREE',
'dane_techniczne': 'brak'
}
def form_valid(self, form):
miejsce_id = self.kwargs['miejsce_id']
miejsce = Miejsce.objects.get(pk = miejsce_id)
form.instance.miejsce = miejsce
self.object = form.save()
now = datetime.datetime.now()
PrzegladyTechniczne.objects.create(
obiektk = self.object,
nazwa_urzadzenia = 'instalacja wentylacji grawitacyjnej',
nr_urzadzenia = 'brak',
opis_czynnosci = 'okresowa kontrola stanu przewodów wentylacyjnych',
jednostka_kontrolujaca = 'Zakład kominiarski <NAME>',
data_ostatniej_czynnosci = now,
nr_protokolu = '63/10/17',
data_najblizszej_czynnosci = now,
osoba_odpowiedzialna_za_nadzor = '<NAME>',
uwagi = 'brak'
)
return super().form_valid(form)
class DodajZbiornikCreateView(LoginRequiredMixin, generic.CreateView):
form_class = ObiektKForm
model = ObiektK
success_url = '/dodane/'
template_name = "baza/dodaj-obiekt-initial.html"
initial = {
'nazwa': 'Zbiornik magazynowy ZP- 50 nr fabr. .....',
'dane_techniczne': 'Zbiornik dwupłaszczowy poj. 50 m3, dwukomorowy (25 m3 Pb 98, 25 m3 ON) prod. Metalchem Kościan nr fabr. 98015 nr ew. N2726000097 rok budowy 1998. Zbiornik wyposażony w przerywacze płomieni PPK-50 prod. Limet o nr 25,26/1994, Zawory oddechowe EKO ZO-50 prod. LIMET nr 21,22/1994. Przestrzeń między płaszczowa monitorowana detektorem wycieku LAG-14 ER prod. Afriso. System kontrolno pomiarowy SITE SENTINEL 2'
}
def form_valid(self, form):
miejsce_id = self.kwargs['miejsce_id']
miejsce = Miejsce.objects.get(pk = miejsce_id)
form.instance.miejsce = miejsce
self.object = form.save()
now = datetime.datetime.now()
DopuszczeniaLegalizacje.objects.create(
obiektk = self.object,
nazwa_urzadzenia = self.object.nazwa,
nr_urzadzenia = 'nr',
opis_czynnosci = 'rewizja wewnetrzna',
jednostka_dozorowa = 'UDT',
data_ostatniej_czynnosci = now,
nr_decyzji = 'b/d',
data_najblizszej_czynnosci = now,
osoba_odpowiedzialna_za_nadzor = '<NAME>',
uwagi = 'brak'
)
DopuszczeniaLegalizacje.objects.create(
obiektk = self.object,
nazwa_urzadzenia = self.object.nazwa,
nr_urzadzenia = '...',
opis_czynnosci = 'prewizja zewnetrzna',
jednostka_dozorowa = 'UDT',
data_ostatniej_czynnosci = now,
nr_decyzji = 'b/d',
data_najblizszej_czynnosci = now,
osoba_odpowiedzialna_za_nadzor = '<NAME>',
uwagi = 'brak'
)
PrzegladyTechniczne.objects.create(
obiektk = self.object,
nazwa_urzadzenia = '<NAME>',
nr_urzadzenia = '...',
opis_czynnosci = 'oględziny i pomiary instalacji elektrycznej',
jednostka_kontrolujaca = 'UE Benedykt Brenk',
data_ostatniej_czynnosci = now,
nr_protokolu = 'b/d',
data_najblizszej_czynnosci = now,
osoba_odpowiedzialna_za_nadzor = '<NAME>',
uwagi = 'obejmują oględziny puszek, połączeń ekwipotencjalnych, zadławień przewodów'
)
PrzegladyTechniczne.objects.create(
obiektk = self.object,
nazwa_urzadzenia = 'System pomiarowy Site Sentinel system pomiarowy tzw. mokry',
nr_urzadzenia = 'b/d',
opis_czynnosci = 'okresowa kontrola stanu technicznego urzadzenia',
jednostka_kontrolujaca = 'Petromarketing Sp. z o.o. tel. 601 533 997',
data_ostatniej_czynnosci = now,
nr_protokolu = 'b/d',
data_najblizszej_czynnosci = now,
osoba_odpowiedzialna_za_nadzor = '<NAME>',
uwagi = 'brak'
)
PrzegladyTechniczne.objects.create(
obiektk = self.object,
nazwa_urzadzenia = 'Zawór oddechowy EKO ZO 50',
nr_urzadzenia = '...',
opis_czynnosci = 'przegląd techniczny',
jednostka_kontrolujaca = '<NAME>',
data_ostatniej_czynnosci = now,
nr_protokolu = 'b/d',
data_najblizszej_czynnosci = now,
osoba_odpowiedzialna_za_nadzor = '<NAME>',
uwagi = 'brak'
)
PrzegladyTechniczne.objects.create(
obiektk = self.object,
nazwa_urzadzenia = 'przerywacz płowmienia PPK-50',
nr_urzadzenia = '...',
opis_czynnosci = 'przegląd techniczny urzadzenia',
jednostka_kontrolujaca = '<NAME>',
data_ostatniej_czynnosci = now,
nr_protokolu = 'b/d',
data_najblizszej_czynnosci = now,
osoba_odpowiedzialna_za_nadzor = '<NAME>',
uwagi = 'brak'
)
PrzegladyTechniczne.objects.create(
obiektk = self.object,
nazwa_urzadzenia = 'przyłacze oparów UNIMAT',
nr_urzadzenia = '...',
opis_czynnosci = 'przegląd techniczny urzadzenia',
jednostka_kontrolujaca = '<NAME>',
data_ostatniej_czynnosci = now,
nr_protokolu = 'b/d',
data_najblizszej_czynnosci = now,
osoba_odpowiedzialna_za_nadzor = '<NAME>',
uwagi = 'brak'
)
return super().form_valid(form)
@login_required
def dodaj_obiekt(request, miejsce_id):
if request.method == 'POST':
form = ObiektKForm(request.POST)
if form.is_valid():
miejsce = Miejsce.objects.get(pk = miejsce_id)
nazwa = form.cleaned_data['nazwa']
dane_techniczne = form.cleaned_data['dane_techniczne']
ObiektK.objects.create(
miejsce = miejsce,
nazwa = nazwa,
dane_techniczne = dane_techniczne)
return HttpResponseRedirect('/dodane/')
else:
return HttpResponseRedirect('/niedodane/')
else:
form = ObiektKForm()
return render(request, 'baza/dodaj_obiekt.html', {'form': form})
@login_required
def dodaj_dopuszczenie(request, miejsce_id, obiekt_id):
if request.method == 'POST':
form = DopuszczeniaLegalizacjeForm(request.POST)
if form.is_valid():
obiektk = ObiektK.objects.get(pk = obiekt_id)
nazwa_urzadzenia = form.cleaned_data['nazwa_urzadzenia']
nr_urzadzenia = form.cleaned_data['nr_urzadzenia']
opis_czynnosci = form.cleaned_data['opis_czynnosci']
jednostka_dozorowa = form.cleaned_data['jednostka_dozorowa']
data_ostatniej_czynnosci = form.cleaned_data['data_ostatniej_czynnosci']
nr_decyzji = form.cleaned_data['nr_decyzji']
data_najblizszej_czynnosci = form.cleaned_data['data_najblizszej_czynnosci']
osoba_odpowiedzialna_za_nadzor = form.cleaned_data['osoba_odpowiedzialna_za_nadzor']
uwagi = form.cleaned_data['uwagi']
DopuszczeniaLegalizacje.objects.create(
obiektk = obiektk,
nazwa_urzadzenia = nazwa_urzadzenia,
nr_urzadzenia = nr_urzadzenia,
opis_czynnosci = opis_czynnosci,
jednostka_dozorowa = jednostka_dozorowa,
data_ostatniej_czynnosci = data_ostatniej_czynnosci,
nr_decyzji = nr_decyzji,
data_najblizszej_czynnosci = data_najblizszej_czynnosci,
osoba_odpowiedzialna_za_nadzor = osoba_odpowiedzialna_za_nadzor,
uwagi = uwagi)
return HttpResponseRedirect('/dodane/')
# else:
# return HttpResponseRedirect('/niedodane/')
else:
form = DopuszczeniaLegalizacjeForm()
return render(request, 'baza/dodaj_dopuszczenia.html', {'form': form})
def dodaj_przeglad(request, miejsce_id, obiekt_id):
if request.method == 'POST':
form = PrzegladyTechniczneForm(request.POST)
if form.is_valid():
obiektk = ObiektK.objects.get(pk = obiekt_id)
nazwa_urzadzenia = form.cleaned_data['nazwa_urzadzenia']
nr_urzadzenia = form.cleaned_data['nr_urzadzenia']
opis_czynnosci = form.cleaned_data['opis_czynnosci']
jednostka_kontrolujaca = form.cleaned_data['jednostka_kontrolujaca']
data_ostatniej_czynnosci = form.cleaned_data['data_ostatniej_czynnosci']
nr_protokolu = form.cleaned_data['nr_protokolu']
data_najblizszej_czynnosci = form.cleaned_data['data_najblizszej_czynnosci']
osoba_odpowiedzialna_za_nadzor = form.cleaned_data['osoba_odpowiedzialna_za_nadzor']
uwagi = form.cleaned_data['uwagi']
PrzegladyTechniczne.objects.create(
obiektk = obiektk,
nazwa_urzadzenia = nazwa_urzadzenia,
nr_urzadzenia = nr_urzadzenia,
opis_czynnosci = opis_czynnosci,
jednostka_kontrolujaca = jednostka_kontrolujaca,
data_ostatniej_czynnosci = data_ostatniej_czynnosci,
nr_protokolu = nr_protokolu,
data_najblizszej_czynnosci = data_najblizszej_czynnosci,
osoba_odpowiedzialna_za_nadzor = osoba_odpowiedzialna_za_nadzor,
uwagi = uwagi)
return HttpResponseRedirect('/dodane/')
# else:
# return HttpResponseRedirect('/niedodane/')
else:
form = PrzegladyTechniczneForm()
return render(request, 'baza/dodaj_przeglad.html', {'form': form})
# ponizej stare views -------------------------------------------
def dodajobiekt(request):
if request.method == 'POST':
form = ObiektForm(request.POST)
if form.is_valid():
typ = form.cleaned_data['typ']
nazwa = form.cleaned_data['nazwa']
lokalizacja = form.cleaned_data['lokalizacja']
numer = form.cleaned_data['nr']
wytyczne = form.cleaned_data['wytyczne']
obiekt = Obiekt.objects.create(
typ=typ,
nazwa=nazwa,
lokalizacja=lokalizacja,
nr=numer,
wytyczne=wytyczne)
return HttpResponseRedirect('/dodane/')
else:
return HttpResponseRedirect('/')
else:
form = ObiektForm()
return render(request, 'baza/dodajobiekt.html', {'form': form})
def dodaj_urzadzenie(request):
if request.method == 'POST':
form = SzukajObiektForm(request.POST)
if form.is_valid():
typ = form.cleaned_data['typ']
obiekty = Obiekt.objects.all().filter(typ=typ)
return render(request, 'baza/okresl_stacje_dla_urzadzenia.html', {'obiekty': obiekty})
else:
return HttpResponseRedirect('/')
form = SzukajObiektForm()
return render(request, 'baza/wybierz_stacje_dla_urzadzenia.html', {'form': form})
def dodajurzadzenie(request):
if request.method == 'POST':
form = SzukajObiektForm(request.POST)
if form.is_valid():
typ = form.cleaned_data['typ']
obiekty = Obiekt.objects.all().filter(typ=typ)
return render(request, 'baza/okreslobiekt.html', {'obiekty': obiekty})
else:
return HttpResponseRedirect('/')
form = SzukajObiektForm()
return render(request, 'baza/dobierzobiekt.html', {'form':form})
def dodane(request):
return render(request, 'baza/dodane.html')
def usuniete(request):
return render(request, 'baza/usuniete.html')
def niedodane(request):
return render(request, 'baza/niedodane.html')
def rafal(request):
return render(request, 'baza/rafal.html')
def signin(request):
return render(request, 'baza/signin.html')
def szukajobiekt(request):
if request.method == 'POST':
form = SzukajObiektForm(request.POST)
if form.is_valid():
typ = form.cleaned_data['typ']
obiekty = Obiekt.objects.all().filter(typ=typ)
return render(request, 'baza/wybierzobiekt.html', {'obiekty': obiekty})
else:
return HttpResponseRedirect('/')
else:
form = SzukajObiektForm()
return render(request, 'baza/szukajobiekt.html', {'form':form})
def dodaj_stacje(request):
if request.method == 'POST':
form = ObiektForm(request.POST)
if form.is_valid():
typ = form.cleaned_data['typ']
nazwa = form.cleaned_data['nazwa']
lokalizacja = form.cleaned_data['lokalizacja']
numer = form.cleaned_data['nr']
wytyczne = form.cleaned_data['wytyczne']
obiekt = Obiekt.objects.create(
typ=typ,
nazwa=nazwa,
lokalizacja=lokalizacja,
nr=numer,
wytyczne=wytyczne)
return HttpResponseRedirect('/dodane/')
else:
return HttpResponseRedirect('/niedodane/')
else:
form = ObiektForm()
return render(request, 'baza/dodaj_stacje.html', {'form': form})
def dodaj_okt(request, stacja_id):
stacja = Obiekt.objects.get(pk=stacja_id)
nazwa_stacji = stacja.nazwa
if request.method == 'POST':
form = UrzadzenieForm(request.POST)
if form.is_valid():
obiekt = stacja
nazwa = form.cleaned_data['nazwa']
lokalizacja = form.cleaned_data['lokalizacja']
numer = form.cleaned_data['nr']
wytyczne = form.cleaned_data['wytyczne']
urzadzenie = Urzadzenie.objects.create(
obiekt=obiekt,
nazwa=nazwa,
lokalizacja=lokalizacja,
nr=numer,
wytyczne=wytyczne)
return HttpResponseRedirect('/dodane/')
else:
return HttpResponseRedirect('/niedodane/')
else:
form = UrzadzenieForm()
return render(request, 'baza/dodaj_obiekt.html', {'nazwa_stacji': nazwa_stacji, 'form': form})
def szukaj(request):
# pobrać z bazy wszystkie stacje
stacje = Obiekt.objects.all().filter(typ='stacja')
# przekazać wszystkie pobrane stacje do renderowania
return render(request, 'baza/szukaj.html', {'obiekty': stacje})
def stacja (request, stacja_id):
stacja = Obiekt.objects.get(pk=stacja_id)
form = StacjaForm(instance=stacja)
obiekty = Urzadzenie.objects.all().filter(obiekt=stacja)
if request.method == 'POST':
form = StacjaForm(request.POST, instance=stacja)
if form.is_valid():
form.save()
return HttpResponseRedirect('/dodane/')
else:
return HttpResponseRedirect('/niedodane/')
return render(request, 'baza/stacja.html', {'stacja': stacja, 'form': form, 'obiekty': obiekty})
def obit(request, stacja_id, obiekt_id):
stacja = Obiekt.objects.get(pk=stacja_id)
obiekt = Urzadzenie.objects.get(pk=obiekt_id)
form = UrzadzenieForm(instance=obiekt)
urzadzenia = Przedmiot.objects.all().filter(urzadzenie=obiekt)
if request.method =='POST':
form = UrzadzenieForm(request.POST, instance=obiekt)
if form.is_valid():
form.save()
return HttpResponseRedirect('/dodane/')
else:
return HttpResponseRedirect('/niedodane/')
return render(request, 'baza/obiekt.html', {'stacja': stacja, 'obiekt': obiekt, 'form': form, 'urzadzenia': urzadzenia})
def urzadzenie(request, stacja_id, obiekt_id, urzadzenie_id):
stacja = Obiekt.objects.get(pk=stacja_id)
obiekt = Urzadzenie.objects.get(pk=obiekt_id)
urzadzenie = Przedmiot.objects.get(pk=urzadzenie_id)
form = PrzedmiotForm(instance=urzadzenie)
if request.method =='POST':
form = PrzedmiotForm(request.POST, instance=urzadzenie)
if form.is_valid():
form.save()
return HttpResponseRedirect('/dodane/')
else:
return HttpResponseRedirect('/niedodane/')
return render(request, 'baza/urzadzenie.html', {'stacja': stacja, 'obiekt': obiekt, 'form': form})
def szukajurzadzenie(request):
if request.method | |
import logging
import click
import os
import traceback
import json
import sys
from aeternity import __version__
from aeternity.epoch import EpochClient
from aeternity.config import Config, MAX_TX_TTL, ConfigException
# from aeternity.oracle import Oracle, OracleQuery, NoOracleResponse
from aeternity.signing import KeyPair
from aeternity.contract import Contract
from aeternity.aens import AEName
from datetime import datetime, timezone
logging.basicConfig(format='%(message)s', level=logging.INFO)
CTX_EPOCH_URL = 'EPOCH_URL'
CTX_EPOCH_URL_INTERNAL = 'EPOCH_URL_INTERNAL'
CTX_EPOCH_URL_WEBSOCKET = 'EPOCH_URL_WEBSOCKET'
CTX_KEY_PATH = 'KEY_PATH'
CTX_VERBOSE = 'VERBOSE'
CTX_QUIET = 'QUIET'
CTX_AET_DOMAIN = 'AET_NAME'
def _epoch_cli():
try:
ctx = click.get_current_context()
# set the default configuration
Config.set_defaults(Config(
external_url=ctx.obj.get(CTX_EPOCH_URL),
internal_url=ctx.obj.get(CTX_EPOCH_URL_INTERNAL),
websocket_url=ctx.obj.get(CTX_EPOCH_URL_WEBSOCKET)
))
except ConfigException as e:
print("Configuration error: ", e)
exit(1)
# load the epoch client
return EpochClient()
def _keypair(password=None):
"""
utility function to get the keypair from the click context
:return: (keypair, keypath)
"""
ctx = click.get_current_context()
kf = ctx.obj.get(CTX_KEY_PATH)
if not os.path.exists(kf):
print(f'Key file {kf} does not exits.')
exit(1)
try:
if password is None:
password = click.prompt("Enter the wallet password", default='', hide_input=True)
return KeyPair.read_from_private_key(kf, password), os.path.abspath(kf)
except Exception:
print("Invalid password")
exit(1)
def _check_prefix(data, prefix):
"""
helper method to check the validity of a prefix
"""
if len(data) < 3:
print("Invalid input, likely you forgot to escape the $ sign (use \\$)")
exit(1)
if not data.startswith(f"{prefix}$"):
if prefix == 'ak':
print("Invalid account address, it shoudld be like: ak$....")
if prefix == 'th':
print("Invalid transaction hash, it shoudld be like: th$....")
if prefix == 'bh':
print("Invalid block hash, it shoudld be like: bh$....")
exit(1)
def _verbose():
"""tell if the command has the verbose flag"""
ctx = click.get_current_context()
return ctx.obj.get(CTX_VERBOSE, False)
def _print(header, value):
print(f" {header.ljust(53, '_')} \n\n{value}\n")
def _pp(data, title=None, prefix=''):
"""
pretty printer
:param data: single enty or list of key-value tuples
:param title: optional title
:param quiet: if true print only the values
"""
ctx = click.get_current_context()
if title is not None:
print(title)
if not isinstance(data, list):
data = [data]
for kv in data:
value = kv[1] if kv[1] is not None else 'N/A'
if isinstance(value, list):
value = ', '.join(value)
if ctx.obj.get(CTX_QUIET, False):
print(value)
else:
label = f"{prefix}{kv[0]}"
print(f"{label.ljust(30, '_')} {value}")
def _ppe(error):
"""pretty printer for errors"""
ctx = click.get_current_context()
print(error)
if ctx.obj.get(CTX_VERBOSE, True):
traceback.print_exc()
def _p_block(block, title=None):
"""Print info of a block """
if title is not None:
print(title)
block_info = [
('Block hash', block.hash),
('Block height', block.height),
('State hash', block.state_hash),
('Miner', block.miner if hasattr(block, 'miner') else 'N/A'),
('Time', datetime.fromtimestamp(block.time / 1000, timezone.utc).isoformat('T')),
('Previous block hash', block.prev_hash),
('Transactions', len(block.transactions) if hasattr(block, 'transactions') else 0)
]
_pp(block_info)
if hasattr(block, 'transactions'):
for tx in block.transactions:
_pp(('Tx Hash', tx.get('hash')), prefix='> ')
_pp(('Signatures', tx.get('signatures')), prefix=' ')
_pp(('Sender', tx.get('tx', {}).get('sender')), prefix=' ')
_pp(('Recipient', tx.get('tx', {}).get('recipient')), prefix=' ')
_pp(('Amount', tx.get('tx', {}).get('amount')), prefix=' ')
def _p_tx(tx):
"""Print info of a transactions"""
_pp([
('Block hash', tx.get('block_hash')),
('Block height', tx.get('block_height')),
('Signatures', tx.get('signatures')),
('Sender account', tx.get('tx', {}).get('sender')),
('Recipient account', tx.get('tx', {}).get('recipient')),
('Amount', tx.get('tx', {}).get('amount')),
('TTL', tx.get('tx', {}).get('ttl'))
])
# Commands
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
# the priority for the url selection is PARAM, ENV, DEFAULT
@click.group()
@click.pass_context
@click.version_option()
@click.option('--url', '-u', default='https://sdk-testnet.aepps.com', envvar='EPOCH_URL', help='Epoch node url', metavar='URL')
@click.option('--url-internal', '-i', default='https://sdk-testnet.aepps.com/internal', envvar='EPOCH_URL_INTERNAL', metavar='URL')
@click.option('--url-websocket', '-w', default='ws://sdk-testnet.aepps.com', envvar='EPOCH_URL_WEBSOCKET', metavar='URL')
@click.option('--quiet', '-q', default=False, is_flag=True, help='Print only results')
@click.option('--verbose', '-v', is_flag=True, default=False, help='Print verbose data')
@click.version_option(version=__version__)
def cli(ctx, url, url_internal, url_websocket, quiet, verbose):
"""
Welcome to the aecli client.
The client is to interact with an epoch node.
"""
ctx.obj[CTX_EPOCH_URL] = url
ctx.obj[CTX_EPOCH_URL_INTERNAL] = url_internal
ctx.obj[CTX_EPOCH_URL_WEBSOCKET] = url_websocket
ctx.obj[CTX_QUIET] = quiet
ctx.obj[CTX_VERBOSE] = verbose
@cli.command('config', help="Print the client configuration")
@click.pass_context
def config(ctx):
_pp([
("Epoch URL", ctx.obj.get(CTX_EPOCH_URL)),
("Epoch internal URL", ctx.obj.get(CTX_EPOCH_URL_INTERNAL, 'N/A')),
("Epoch websocket URL", ctx.obj.get(CTX_EPOCH_URL_WEBSOCKET, 'N/A')),
], title="aecli settings")
# __ __ _ _ _
# \ \ / / | | | | |
# \ \ /\ / /_ _| | | ___| |_ ___
# \ \/ \/ / _` | | |/ _ \ __/ __|
# \ /\ / (_| | | | __/ |_\__ \
# \/ \/ \__,_|_|_|\___|\__|___/
#
#
@cli.group(help="Handle wallet operations")
@click.pass_context
@click.argument('key_path', default='sign_key', envvar='WALLET_SIGN_KEY_PATH')
def wallet(ctx, key_path):
ctx.obj[CTX_KEY_PATH] = key_path
@wallet.command('create', help="Create a new wallet")
@click.pass_context
@click.option('--password', default=None, help="Set a password from the command line [WARN: this method is not secure]")
@click.option('--force', default=False, is_flag=True, help="Overwrite exising keys without asking")
def wallet_create(ctx, password, force):
kp = KeyPair.generate()
kf = ctx.obj.get(CTX_KEY_PATH)
if not force and os.path.exists(kf):
click.confirm(f'Key file {kf} already exists, overwrite?', abort=True)
if password is None:
password = click.prompt("Enter the wallet password", default='', hide_input=True)
kp.save_to_file(kf, password)
_pp([
('Wallet address', kp.get_address()),
('Wallet path', os.path.abspath(kf))
], title='Wallet created')
@wallet.command('save', help='Save a private keys string to a password protected file wallet')
@click.argument("private_key")
@click.pass_context
def wallet_save(ctx, private_key):
try:
kp = KeyPair.from_private_key_string(private_key)
kf = ctx.obj.get(CTX_KEY_PATH)
if os.path.exists(kf):
click.confirm(f'Key file {kf} already exists, overwrite?', abort=True)
password = click.prompt("Enter the wallet password", default='', hide_input=True)
kp.save_to_file(kf, password)
_pp([
('Wallet address', kp.get_address()),
('Wallet path', os.path.abspath(kf))
], title='Wallet saved')
except Exception as e:
_ppe(e)
@wallet.command('address', help="Print the wallet address (public key)")
@click.option('--password', default=None, help="Read the password from the command line [WARN: this method is not secure]")
@click.option('--private-key', is_flag=True, help="Print the private key instead of the account address")
def wallet_address(password, private_key):
kp, kf = _keypair(password=password)
if private_key:
_pp([
("Private key", kp.get_private_key()),
])
_pp([
('Wallet address', kp.get_address()),
])
@wallet.command('balance', help="Get the balance of a wallet")
@click.option('--password', default=None, help="Read the password from the command line [WARN: this method is not secure]")
def wallet_balance(password):
kp, _ = _keypair(password=password)
try:
balance = _epoch_cli().get_balance(kp.get_address())
_pp(
("Account balance", balance)
)
except Exception as e:
_ppe(e)
@wallet.command('spend', help="Create a transaction to another wallet")
@click.argument('recipient_account', required=True)
@click.argument('amount', required=True, default=1)
@click.option('--ttl', default=MAX_TX_TTL, help="Validity of the spend transaction in number of blocks (default forever)")
@click.option('--password', default=<PASSWORD>, help="Read the password from the command line [WARN: this method is not secure]")
def wallet_spend(recipient_account, amount, ttl, password):
kp, _ = _keypair(password=password)
try:
_check_prefix(recipient_account, "ak")
data = _epoch_cli().spend(kp, recipient_account, amount, tx_ttl=ttl)
_pp([
("Transaction hash", data.tx_hash),
("Sender account", kp.get_address()),
("Recipient account", recipient_account),
], title='Transaction posted to the chain')
except Exception as e:
_ppe(e)
# _ _
# | \ | |
# | \| | __ _ _ __ ___ ___ ___
# | . ` |/ _` | '_ ` _ \ / _ \/ __|
# | |\ | (_| | | | | | | __/\__ \
# |_| \_|\__,_|_| |_| |_|\___||___/
#
#
@wallet.group(help="Handle name lifecycle")
@click.argument('domain')
@click.pass_context
def name(ctx, domain):
ctx.obj[CTX_AET_DOMAIN] = domain
@name.command('claim', help="Claim a domain name")
@click.option("--name-ttl", default=100, help='Lifetime of the claim in blocks (default 100)')
@click.option("--ttl", default=100, help='Lifetime of the claim request in blocks (default 100)')
@click.pass_context
def name_register(ctx, name_ttl, ttl):
try:
# retrieve the domain from the context
domain = ctx.obj.get(CTX_AET_DOMAIN)
# retrieve the keypair
kp, _ = _keypair()
name = AEName(domain, client=_epoch_cli())
name.update_status()
if name.status != AEName.Status.AVAILABLE:
print("Domain not available")
exit(0)
tx = name.full_claim_blocking(kp, name_ttl=name_ttl, tx_ttl=ttl)
_pp([
("Transaction hash", tx.tx_hash),
], title=f"Name {domain} claimed")
except Exception as e:
_ppe(e)
@name.command('update')
@click.pass_context
@click.argument('address')
@click.option("--name-ttl", default=100, help='Lifetime of the claim in blocks (default 100)')
@click.option("--ttl", default=100, help='Lifetime of the claim request in blocks (default 100)')
def name_update(ctx, address, name_ttl, ttl):
"""
Update a name pointer
"""
# retrieve the domain from the context
domain = ctx.obj.get(CTX_AET_DOMAIN)
# retrieve the keypair
kp, _ = _keypair()
name = AEName(domain)
name.update_status()
if name.status != AEName.Status.CLAIMED:
print(f"Domain is {name.status} and cannot be transferred")
exit(0)
tx = name.update(kp, target=address, name_ttl=name_ttl, tx_ttl=ttl)
_pp([
('Transaction hash', tx.tx_hash)
], title=f"Name {domain} status {name.status}")
@name.command('revoke')
@click.pass_context
def name_revoke(ctx):
# retrieve the domain from the context
domain = ctx.obj.get(CTX_AET_DOMAIN)
# retrieve the keypair
kp, _ = _keypair()
name = AEName(domain)
name.update_status()
if name.status == AEName.Status.AVAILABLE:
print("Domain is available, nothing to revoke")
exit(0)
tx = name.revoke(kp)
_pp([
('Transaction hash', tx.tx_hash)
], title=f"Name {domain} status {name.status}")
@name.command('transfer')
@click.pass_context
@click.argument('address')
def name_transfer(ctx, address):
"""
Transfer a name to another account
"""
# retrieve the domain from the context
domain = ctx.obj.get(CTX_AET_DOMAIN)
# retrieve the keypair
kp, _ = _keypair()
name = AEName(domain)
name.update_status()
if name.status != AEName.Status.CLAIMED:
print(f"Domain is {name.status} and cannot be transferred")
exit(0)
tx = name.transfer_ownership(kp, address)
_pp([
('Transaction hash', tx.tx_hash)
], title=f"Name {domain} status {name.status}")
# ____ _
# / __ \ | |
# | | | |_ __ __ _ ___| | ___ ___
# | | | | '__/ _` |/ __| |/ _ \/ __|
# | |__| | | | (_| | (__| | __/\__ \
# \____/|_| \__,_|\___|_|\___||___/
#
#
@cli.group(help="Interact with oracles")
def oracle():
pass
@oracle.command('register')
def oracle_register():
print("register oracle")
pass
@oracle.command('query')
def oracle_query():
print("query oracle")
pass
# _____ _ _
# / ____| | | | |
# | | ___ _ __ | |_ _ __ __ _ ___| |_ ___
# | | / _ \| '_ \| __| '__/ _` |/ __| __/ __|
# | |___| (_) | | | | |
<reponame>AdvaitDhingra/vector<gh_stars>10-100
# Copyright (c) 2019-2021, <NAME>, <NAME>, <NAME>, and <NAME>.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import typing
def _recname(is_momentum: bool, dimension: int) -> str:
name = "Momentum" if is_momentum else "Vector"
return f"{name}{dimension}D"
def _check_names(
projectable: typing.Any, fieldnames: typing.List[str]
) -> typing.Tuple[bool, int, typing.List[str], typing.Any]:
complaint1 = "duplicate coordinates (through momentum-aliases): " + ", ".join(
repr(x) for x in fieldnames
)
complaint2 = (
"unrecognized combination of coordinates, allowed combinations are:\n\n"
" (2D) x= y=\n"
" (2D) rho= phi=\n"
" (3D) x= y= z=\n"
" (3D) x= y= theta=\n"
" (3D) x= y= eta=\n"
" (3D) rho= phi= z=\n"
" (3D) rho= phi= theta=\n"
" (3D) rho= phi= eta=\n"
" (4D) x= y= z= t=\n"
" (4D) x= y= z= tau=\n"
" (4D) x= y= theta= t=\n"
" (4D) x= y= theta= tau=\n"
" (4D) x= y= eta= t=\n"
" (4D) x= y= eta= tau=\n"
" (4D) rho= phi= z= t=\n"
" (4D) rho= phi= z= tau=\n"
" (4D) rho= phi= theta= t=\n"
" (4D) rho= phi= theta= tau=\n"
" (4D) rho= phi= eta= t=\n"
" (4D) rho= phi= eta= tau="
)
is_momentum = False
dimension = 0
names = []
columns = []
if "x" in fieldnames and "y" in fieldnames:
if dimension != 0:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 2
names.extend(["x", "y"])
columns.extend([projectable["x"], projectable["y"]])
fieldnames.remove("x")
fieldnames.remove("y")
if "rho" in fieldnames and "phi" in fieldnames:
if dimension != 0:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 2
names.extend(["rho", "phi"])
columns.extend([projectable["rho"], projectable["phi"]])
fieldnames.remove("rho")
fieldnames.remove("phi")
if "x" in fieldnames and "py" in fieldnames:
is_momentum = True
if dimension != 0:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 2
names.extend(["x", "y"])
columns.extend([projectable["x"], projectable["py"]])
fieldnames.remove("x")
fieldnames.remove("py")
if "px" in fieldnames and "y" in fieldnames:
is_momentum = True
if dimension != 0:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 2
names.extend(["x", "y"])
columns.extend([projectable["px"], projectable["y"]])
fieldnames.remove("px")
fieldnames.remove("y")
if "px" in fieldnames and "py" in fieldnames:
is_momentum = True
if dimension != 0:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 2
names.extend(["x", "y"])
columns.extend([projectable["px"], projectable["py"]])
fieldnames.remove("px")
fieldnames.remove("py")
if "pt" in fieldnames and "phi" in fieldnames:
is_momentum = True
if dimension != 0:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 2
names.extend(["rho", "phi"])
columns.extend([projectable["pt"], projectable["phi"]])
fieldnames.remove("pt")
fieldnames.remove("phi")
if "z" in fieldnames:
if dimension != 2:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 3
names.append("z")
columns.append(projectable["z"])
fieldnames.remove("z")
if "theta" in fieldnames:
if dimension != 2:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 3
names.append("theta")
columns.append(projectable["theta"])
fieldnames.remove("theta")
if "eta" in fieldnames:
if dimension != 2:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 3
names.append("eta")
columns.append(projectable["eta"])
fieldnames.remove("eta")
if "pz" in fieldnames:
is_momentum = True
if dimension != 2:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 3
names.append("z")
columns.append(projectable["pz"])
fieldnames.remove("pz")
if "t" in fieldnames:
if dimension != 3:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 4
names.append("t")
columns.append(projectable["t"])
fieldnames.remove("t")
if "tau" in fieldnames:
if dimension != 3:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 4
names.append("tau")
columns.append(projectable["tau"])
fieldnames.remove("tau")
if "E" in fieldnames:
is_momentum = True
if dimension != 3:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 4
names.append("t")
columns.append(projectable["E"])
fieldnames.remove("E")
if "e" in fieldnames:
is_momentum = True
if dimension != 3:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 4
names.append("t")
columns.append(projectable["e"])
fieldnames.remove("e")
if "energy" in fieldnames:
is_momentum = True
if dimension != 3:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 4
names.append("t")
columns.append(projectable["energy"])
fieldnames.remove("energy")
if "M" in fieldnames:
is_momentum = True
if dimension != 3:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 4
names.append("tau")
columns.append(projectable["M"])
fieldnames.remove("M")
if "m" in fieldnames:
is_momentum = True
if dimension != 3:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 4
names.append("tau")
columns.append(projectable["m"])
fieldnames.remove("m")
if "mass" in fieldnames:
is_momentum = True
if dimension != 3:
raise TypeError(complaint1 if is_momentum else complaint2)
dimension = 4
names.append("tau")
columns.append(projectable["mass"])
fieldnames.remove("mass")
if dimension == 0:
raise TypeError(complaint1 if is_momentum else complaint2)
for name in fieldnames:
names.append(name)
columns.append(projectable[name])
return is_momentum, dimension, names, columns
def Array(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
"""
Constructs an Awkward Array of vectors, whose type is determined by the fields
of the record array (which may be nested within lists or other non-record structures).
All allowed signatures for ``ak.Array`` can be used in this function.
The array must contain records with the following combinations of field names:
- (2D) ``x``, ``y``
- (2D) ``rho``, ``phi``
- (3D) ``x``, ``y``, ``z``
- (3D) ``x``, ``y``, ``theta``
- (3D) ``x``, ``y``, ``eta``
- (3D) ``rho``, ``phi``, ``z``
- (3D) ``rho``, ``phi``, ``theta``
- (3D) ``rho``, ``phi``, ``eta``
- (4D) ``x``, ``y``, ``z``, ``t``
- (4D) ``x``, ``y``, ``z``, ``tau```
- (4D) ``x``, ``y``, ``theta``, ``t```
- (4D) ``x``, ``y``, ``theta``, ``tau```
- (4D) ``x``, ``y``, ``eta``, ``t```
- (4D) ``x``, ``y``, ``eta``, ``tau```
- (4D) ``rho``, ``phi``, ``z``, ``t```
- (4D) ``rho``, ``phi``, ``z``, ``tau```
- (4D) ``rho``, ``phi``, ``theta``, ``t```
- (4D) ``rho``, ``phi``, ``theta``, ``tau```
- (4D) ``rho``, ``phi``, ``eta``, ``t```
- (4D) ``rho``, ``phi``, ``eta``, ``tau```
in which
- ``px`` may be substituted for ``x``
- ``py`` may be substituted for ``y``
- ``pt`` may be substituted for ``rho``
- ``pz`` may be substituted for ``z``
- ``E`` may be substituted for ``t``
- ``e`` may be substituted for ``t``
- ``energy`` may be substituted for ``t``
- ``M`` may be substituted for ``tau``
- ``m`` may be substituted for ``tau``
- ``mass`` may be substituted for ``tau``
to make the vector a momentum vector.
No constraints are placed on the types of the vector fields, though if they
are not numbers, mathematical operations will fail. Usually, you want them to be
integers or floating-point numbers.
"""
import awkward
import vector
import vector._backends.awkward_ # noqa: 401
akarray = awkward.Array(*args, **kwargs)
fields = awkward.fields(akarray)
is_momentum, dimension, names, arrays = _check_names(akarray, fields)
needs_behavior = not vector._awkward_registered
for x in arrays:
if needs_behavior:
if x.behavior is None:
x.behavior = vector._backends.awkward_.behavior
else:
x.behavior = dict(x.behavior)
x.behavior.update(vector._backends.awkward_.behavior)
else:
x.behavior = None
needs_behavior = False
assert 2 <= dimension <= 4, f"Dimension must be between 2-4, not {dimension}"
return awkward.zip(
dict(__builtins__["zip"](names, arrays)), # type:ignore
depth_limit=akarray.layout.purelist_depth,
with_name=_recname(is_momentum, dimension),
)
def zip(
arrays: typing.Dict[str, typing.Any], depth_limit: typing.Optional[int] = None
) -> typing.Any:
"""
Constructs an Awkward Array of vectors, whose type is determined by the fields
of the record array (which may be nested within lists or other non-record structures).
This function accepts a subset of ``ak.zip``'s arguments.
Args:
arrays (dict of str to array-like): Arrays, lists, etc. to zip together.
Unlike ``ak.zip``, this must be a dict with string keys to determine
the coordinate system of the arrays; it may not be a tuple.
depth_limit (None or int): If None, attempt to fully broadcast the
``array`` to all levels. If an int, limit the number of dimensions
that get broadcasted. The minimum value is ``1``, for no broadcasting.
The array must contain records with the following combinations of field names:
- (2D) ``x``, ``y``
- (2D) ``rho``, ``phi``
- (3D) ``x``, ``y``, ``z``
- (3D) ``x``, ``y``, ``theta``
- (3D) ``x``, ``y``, ``eta``
- (3D) ``rho``, ``phi``, ``z``
- (3D) ``rho``, ``phi``, ``theta``
- (3D) ``rho``, ``phi``, ``eta``
- (4D) ``x``, ``y``, ``z``, ``t``
- (4D) ``x``, ``y``, ``z``, ``tau```
- (4D) ``x``, ``y``, ``theta``, ``t```
- (4D) ``x``, ``y``, ``theta``, ``tau```
- (4D) ``x``, ``y``, ``eta``, ``t```
- (4D) ``x``, ``y``, ``eta``, ``tau```
- (4D) ``rho``, ``phi``, ``z``, ``t```
- (4D) ``rho``, ``phi``, ``z``, ``tau```
- (4D) ``rho``, ``phi``, ``theta``, ``t```
- (4D) ``rho``, ``phi``, ``theta``, ``tau```
- (4D) ``rho``, ``phi``, ``eta``, ``t```
- (4D) ``rho``, ``phi``, ``eta``, ``tau```
in which
- ``px`` may be substituted for ``x``
- ``py`` may be substituted for ``y``
- ``pt`` may be substituted for ``rho``
- ``pz`` may be substituted for ``z``
- ``E`` may be | |
except AttributeError as err:
raise ImportError(f'Module {module_path} does not define a {class_name} attribute/class') from err
def get_attr_if_exists(obj: Any, attr: str) -> Union[object, None]:
if hasattr(obj, attr):
return getattr(obj, attr)
return None
def time_format(seconds: float, format_='%H:%M:%S') -> Union[str, float]:
"""
Default format is '%H:%M:%S'
>>> time_format(3600)
'01:00:00'
"""
# this because NaN
if seconds >= 0 or seconds < 0:
time_ = time.strftime(format_, time.gmtime(abs(seconds)))
if seconds < 0:
return f"-{time_}"
return time_
return seconds # NaN
def fill(value: Union[list, str, tuple], max_size, with_=' ') -> Any:
"""
Calculates and adds value
"""
fill_values = [with_] * (max_size - len(value))
if isinstance(value, str):
fill_values = ' '.join(fill_values)
value = f"{value}{fill_values}"
elif isinstance(value, list):
value += fill_values
elif isinstance(value, tuple):
value += tuple(fill_values)
return value
def list_methods(klass) -> List[str]:
methods = []
for i in dir(klass):
if i.startswith('_') or not callable(getattr(klass, i)):
continue
methods.append(i)
return methods
def string_to_literal(val: Union[str, bytes]):
if isinstance(val, (str, bytes)):
try:
return ast.literal_eval(val)
except:
pass
return val
def module_references(instance: types.ModuleType, **kwargs) -> dict:
"""
dict of all functions and classes defined in the module.
To also list the variables it is necessary to define explicitly with the special variable on your module
_include
**kwargs:
_include -> to includes any definition and variables
_exclude -> to exclude any definition
:param instance:
:return: List[str]
"""
assert isinstance(instance, types.ModuleType), "You need to submit a module instance."
logger.debug(f"Checking module {instance.__name__}")
definitions = {}
for i in dir(instance):
if i.startswith('_'):
continue
exclude = get_attr_if_exists(instance, "_exclude") or kwargs.get("_exclude") or []
include = get_attr_if_exists(instance, "_include") or kwargs.get("_include") or []
obj = get_attr_if_exists(instance, i)
if i in include:
definitions[i] = obj
if obj is not None and i not in exclude and callable(obj):
if obj.__module__ == instance.__name__:
definitions[i] = obj
logger.debug(f"Collected: {definitions}")
return definitions
def install_if_not(lib_name: str):
from ..display import console
try:
importlib.import_module(lib_name)
output = 'Alredy Installed'
except ImportError:
from ..system.commons import run_on_terminal
command_ = f"{sys.executable} -m pip install {lib_name}"
output = run_on_terminal(command_)
console.log(output)
def set_log_level(level: Union[int, str]):
"""
Default log level is INFO
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
"""
log = logging.getLogger()
log.setLevel(level)
logger.info(f"Update log level to {level}")
def logger_level():
import logging
return logging.getLogger().level
def combine_with_all(a: list, b: list, n_a_combinations: int = 1, is_random: bool = False) -> List[Tuple[Any, ...]]:
"""
>>> a = [1, 2, 3]
>>> b = ['anything_a', 'anything_b']
>>> combine_with_all(a, b)
[(1, 'anything_a'), (1, 'anything_b'), (2, 'anything_a'), (2, 'anything_b'), (3, 'anything_a'), (3, 'anything_b')]
>>> combine_with_all(a, b, n_a_combinations=2)
[((1, 2), 'anything_a'), ((1, 2), 'anything_b'),
((1, 3), 'anything_a'), ((1, 3), 'anything_b'),
((2, 3), 'anything_a'), ((2, 3), 'anything_b')]
"""
if not isinstance(n_a_combinations, int):
raise TypeError(f"Please send {int}.")
n_a_combinations = len(a) if n_a_combinations > len(a) else abs(n_a_combinations)
combination = itertools.combinations(a, n_a_combinations) if n_a_combinations > 1 else a
product_with_b = list(itertools.product(combination, b))
if is_random:
random.shuffle(product_with_b)
return product_with_b
class CjTest(object):
__template_unittest_function = """
def test_{func_name}(self):
pass
"""
__template_unittest_class = """
class {class_name}Test(unittest.TestCase):
{func_tests}
"""
__template_unittest = """import unittest
{tests}
if __name__ == '__main__':
unittest.main()
"""
__prefix_attr_err = "Attr Check Error {attr_}."
def __init__(self, instance_obj: object):
self._prefix_attr = f"__{instance_obj.__class__.__name__}__"
self._instance_obj = instance_obj
self._set_attr_current_values()
self._checks = []
self._n_checks_passed = 0
@property
def checks(self):
return self._checks
@property
def n_checks(self):
return len(self._checks)
@property
def _instance_obj_attrs(self):
return filter(lambda attr_: attr_.__contains__('__') is False, dir(self._instance_obj))
def _get_attr_obj(self, attr_: str):
if not hasattr(self._instance_obj, attr_):
raise ValueError(f"Attr {attr_} not found.")
value = getattr(self._instance_obj, attr_)
return self._Attr(attr_, value)
def _set_attr_current_values(self):
for attr_ in self._instance_obj_attrs:
attr_obj = self._get_attr_obj(attr_)
attr_name = self.parse_attr(attr_)
setattr(self, attr_name, attr_obj)
def parse_attr(self, attr_: str):
attr_ = self._valid_attr(attr_)
return f'{self._prefix_attr}{attr_}'
def __getattr__(self, item):
return self.__getattribute__(self.parse_attr(item))
class _Attr(object):
def __init__(self, name: str, value: Any):
self.name = name
self.is_callable = callable(value)
self.is_private = self.name.startswith('_')
self.is_bool = value is True or value is False
self.is_class = isinstance(value, ClassType)
self.is_function = isinstance(value, FunctionType)
self.class_of_attr = value.__class__
self._operator_repr = None
self.tests_case = []
def __repr__(self):
return f"{self.name}"
def __str__(self):
return f"{self.name}"
def __len__(self):
return len(self.tests_case)
def __eq__(self, other):
""" ==value """
if isinstance(other, self.__class__):
return NotImplemented
self.tests_case = (other, self.class_of_attr.__eq__, '==')
return self
def __ge__(self, other):
""">=value """
if isinstance(other, self.__class__):
return NotImplemented
self.tests_case = (other, self.class_of_attr.__ge__, '>=')
return self
def __gt__(self, other):
""">value """
if isinstance(other, self.__class__):
return NotImplemented
self.tests_case = (other, self.class_of_attr.__gt__, '>')
return self
def __le__(self, other):
""" <=value. """
if isinstance(other, self.__class__):
return NotImplemented
self.tests_case = (other, self.class_of_attr.__le__, '<=')
return self
def __lt__(self, other):
""" <value. """
if isinstance(other, self.__class__):
return NotImplemented
self.tests_case = (other, self.class_of_attr.__lt__, '<')
return self
def __ne__(self, other):
""" !=value. """
if isinstance(other, self.__class__):
return NotImplemented
self.tests_case = (other, self.class_of_attr.__ne__, '!=')
return self
def copy(self):
return copy(self)
def run(self, current_value):
expected, operator, _ = self.tests_case
if not operator(current_value, expected):
return [f"{repr(current_value)} not {_} {repr(expected)}"]
return []
def _valid_attr(self, attr_name: str):
assert hasattr(self._instance_obj,
attr_name), f"{self.__prefix_attr_err.format(attr_=repr(attr_name))} isn't defined."
return attr_name
def add_check(self, *check_: _Attr):
for i in check_:
if i not in self._checks:
self._checks.append(i)
def remove_check(self, index: int):
self._checks.pop(index)
def check_attr(self, attr_name: Union[str, _Attr]):
if isinstance(attr_name, str):
stored_test = self.__getattribute__(self.parse_attr(attr_name))
else:
stored_test = attr_name
current_value = getattr(self._instance_obj, stored_test.name)
if not stored_test.is_callable:
tests_ = stored_test.run(current_value)
passed = not any(tests_)
self._n_checks_passed += len(stored_test) - len(tests_)
msg_err = f"{self.__prefix_attr_err.format(attr_=repr(stored_test.name))} {' '.join(tests_)}"
assert passed, msg_err
def check_all(self):
for attr_ in self._checks:
self.check_attr(attr_)
@classmethod
def _get_class_test(cls, ref):
func_tests = ''.join(cls.__template_unittest_function.format(func_name=i) for i in list_methods(ref))
return cls.__template_unittest_class.format(class_name=ref.__name__, func_tests=func_tests)
@classmethod
def _get_func_test(cls, ref):
return cls.__template_unittest_function.format(func_name=ref.__name__)
@classmethod
def _get_test(cls, ref):
if isinstance(ref, (FunctionType, types.MethodType)):
return cls._get_func_test(ref)
if isinstance(ref, type):
return cls._get_class_test(ref)
raise TypeError("send a function or class reference")
@classmethod
def build_test(cls, reference):
module_func_test = []
tests = []
if isinstance(reference, types.ModuleType):
for _, ref in module_references(reference).items():
if isinstance(ref, type):
tests.append(cls._get_test(ref))
continue
module_func_test.append(cls._get_test(ref))
else:
if isinstance(reference, type):
tests.append(cls._get_test(reference))
else:
module_func_test.append(cls._get_test(reference))
if module_func_test:
module_func_test = ''.join(module_func_test)
tests = [cls.__template_unittest_class.format(class_name='Module', func_tests=module_func_test)] + tests
return cls.__template_unittest.format(tests='\n'.join(tests))
def _add_license(base_dir, ext='.py'):
from cereja.file import FileIO
from cereja.config import BASE_DIR
licence_file = FileIO.load(BASE_DIR)
for file in FileIO.load_files(base_dir, ext=ext, recursive=True):
if 'Copyright (c) 2019 The Cereja Project' in file.string:
continue
file.insert('"""\n' + licence_file.string + '\n"""')
file.save(exist_ok=True)
def _rescale_down(input_list, size):
assert len(input_list) >= size, f'{len(input_list), size}'
skip = len(input_list) // size
for n, i in enumerate(range(0, len(input_list), skip), start=1):
if n > size:
break
yield input_list[i]
def _rescale_up(values, k, fill_with=None, filling='inner'):
size = len(values)
assert size <= k, f'Error while resizing: {size} < {k}'
clones = (math.ceil(abs(size - k) / size))
refill_values = abs(k - size * clones)
if filling == 'pre':
for i in range(abs(k - size)):
yield fill_with if fill_with is not None else values[0]
for value in values:
# guarantees that the original value will be returned
yield value
if filling != 'inner':
continue
for i in range(clones - 1): # -1 because last line.
# value original or fill_with.
yield fill_with if fill_with is not None else value
if refill_values > 0:
refill_values -= 1
yield fill_with if fill_with is not None else value
k -= 1
if k < 0:
break
if filling == 'post':
for i in range(abs(k - size)):
yield fill_with if fill_with is not None else values[-1]
def _interpolate(values, k):
if isinstance(values, list):
from ..array import Matrix
# because true_div ...
values = Matrix(values)
size = len(values)
first_position = 0
last_position = size - 1
step = (last_position - first_position) / (k - 1)
positions = [first_position]
previous_position = positions[-1]
for _ in range(k - 2):
positions.append(previous_position + step)
previous_position = positions[-1]
positions.append(last_position)
for position in positions:
previous_position = math.floor(position)
next_position = math.ceil(position)
if previous_position == next_position:
yield values[previous_position]
else:
delta = position - previous_position
yield values[previous_position] + (values[next_position] - values[previous_position]) / (
next_position - previous_position) * delta
def rescale_values(values: List[Any], granularity: int, interpolation: bool = False, fill_with=None, filling='inner') -> \
List[Any]:
"""
Resizes a list of values
eg.
>>> import cereja as cj
>>> cj.rescale_values(values=list(range(100)),granularity=12)
[0, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88]
>>> cj.rescale_values(values=list(range(5)),granularity=10)
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4]
>>> cj.rescale_values(values=list(range(5)),granularity=10, filling='pre')
[0, 0, 0, 0, 0, 0, 1, 2, 3, 4]
>>> | |
# (c) 2012-2014 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
''' This module contains:
* all low-level code for extracting, linking and unlinking packages
* a very simple CLI
These API functions have argument names referring to:
dist: canonical package name (e.g. 'numpy-1.6.2-py26_0')
pkgs_dir: the "packages directory" (e.g. '/opt/anaconda/pkgs' or
'/home/joe/envs/.pkgs')
prefix: the prefix of a particular environment, which may also
be the "default" environment (i.e. sys.prefix),
but is otherwise something like '/opt/anaconda/envs/foo',
or even any prefix, e.g. '/home/joe/myenv'
Also, this module is directly invoked by the (self extracting (sfx)) tarball
installer to create the initial environment, therefore it needs to be
standalone, i.e. not import any other parts of `conda` (only depend on
the standard library).
'''
from __future__ import print_function, division, absolute_import
import errno
import json
import logging
import os
import shlex
import shutil
import stat
import subprocess
import sys
import tarfile
import time
import traceback
from os.path import abspath, basename, dirname, isdir, isfile, islink, join, relpath
try:
from conda.lock import Locked
except ImportError:
# Make sure this still works as a standalone script for the Anaconda
# installer.
class Locked(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
on_win = bool(sys.platform == 'win32')
if on_win:
import ctypes
from ctypes import wintypes
CreateHardLink = ctypes.windll.kernel32.CreateHardLinkW
CreateHardLink.restype = wintypes.BOOL
CreateHardLink.argtypes = [wintypes.LPCWSTR, wintypes.LPCWSTR,
wintypes.LPVOID]
try:
CreateSymbolicLink = ctypes.windll.kernel32.CreateSymbolicLinkW
CreateSymbolicLink.restype = wintypes.BOOL
CreateSymbolicLink.argtypes = [wintypes.LPCWSTR, wintypes.LPCWSTR,
wintypes.DWORD]
except AttributeError:
CreateSymbolicLink = None
def win_hard_link(src, dst):
"Equivalent to os.link, using the win32 CreateHardLink call."
if not CreateHardLink(dst, src, None):
raise OSError('win32 hard link failed')
def win_soft_link(src, dst):
"Equivalent to os.symlink, using the win32 CreateSymbolicLink call."
if CreateSymbolicLink is None:
raise OSError('win32 soft link not supported')
if not CreateSymbolicLink(dst, src, isdir(src)):
raise OSError('win32 soft link failed')
log = logging.getLogger(__name__)
stdoutlog = logging.getLogger('stdoutlog')
class NullHandler(logging.Handler):
""" Copied from Python 2.7 to avoid getting
`No handlers could be found for logger "patch"`
http://bugs.python.org/issue16539
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
log.addHandler(NullHandler())
LINK_HARD = 1
LINK_SOFT = 2
LINK_COPY = 3
link_name_map = {
LINK_HARD: 'hard-link',
LINK_SOFT: 'soft-link',
LINK_COPY: 'copy',
}
def _link(src, dst, linktype=LINK_HARD):
if linktype == LINK_HARD:
if on_win:
win_hard_link(src, dst)
else:
os.link(src, dst)
elif linktype == LINK_SOFT:
if on_win:
win_soft_link(src, dst)
else:
os.symlink(src, dst)
elif linktype == LINK_COPY:
# copy relative symlinks as symlinks
if not on_win and islink(src) and not os.readlink(src).startswith('/'):
os.symlink(os.readlink(src), dst)
else:
shutil.copy2(src, dst)
else:
raise Exception("Did not expect linktype=%r" % linktype)
def _remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def warn_failed_remove(function, path, exc_info):
if exc_info[1].errno == errno.EACCES:
log.warn("Cannot remove, permission denied: {0}".format(path))
elif exc_info[1].errno == errno.ENOTEMPTY:
log.warn("Cannot remove, not empty: {0}".format(path))
else:
log.warn("Cannot remove, unknown reason: {0}".format(path))
def rm_rf(path, max_retries=5, trash=True):
"""
Completely delete path
max_retries is the number of times to retry on failure. The default is
5. This only applies to deleting a directory.
If removing path fails and trash is True, files will be moved to the trash directory.
"""
if islink(path) or isfile(path):
# Note that we have to check if the destination is a link because
# exists('/path/to/dead-link') will return False, although
# islink('/path/to/dead-link') is True.
if os.access(path, os.W_OK):
os.unlink(path)
else:
log.warn("Cannot remove, permission denied: {0}".format(path))
elif isdir(path):
for i in range(max_retries):
try:
shutil.rmtree(path, ignore_errors=False, onerror=warn_failed_remove)
return
except OSError as e:
msg = "Unable to delete %s\n%s\n" % (path, e)
if on_win:
try:
shutil.rmtree(path, onerror=_remove_readonly)
return
except OSError as e1:
msg += "Retry with onerror failed (%s)\n" % e1
p = subprocess.Popen(['cmd', '/c', 'rd', '/s', '/q', path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
msg += '%s\n%s\n' % (stdout, stderr)
else:
if not isdir(path):
return
if trash:
try:
move_path_to_trash(path)
if not isdir(path):
return
except OSError as e2:
raise
msg += "Retry with onerror failed (%s)\n" % e2
log.debug(msg + "Retrying after %s seconds..." % i)
time.sleep(i)
# Final time. pass exceptions to caller.
shutil.rmtree(path, ignore_errors=False, onerror=warn_failed_remove)
def rm_empty_dir(path):
"""
Remove the directory `path` if it is a directory and empty.
If the directory does not exist or is not empty, do nothing.
"""
try:
os.rmdir(path)
except OSError: # directory might not exist or not be empty
pass
def yield_lines(path):
for line in open(path):
line = line.strip()
if not line or line.startswith('#'):
continue
yield line
prefix_placeholder = ('/opt/anaconda1anaconda2'
# this is intentionally split into parts,
# such that running this program on itself
# will leave it unchanged
'anaconda3')
def read_has_prefix(path):
"""
reads `has_prefix` file and return dict mapping filenames to
tuples(placeholder, mode)
"""
res = {}
try:
for line in yield_lines(path):
try:
placeholder, mode, f = [x.strip('"\'') for x in
shlex.split(line, posix=False)]
res[f] = (placeholder, mode)
except ValueError:
res[line] = (prefix_placeholder, 'text')
except IOError:
pass
return res
class PaddingError(Exception):
pass
def binary_replace(data, a, b):
"""
Perform a binary replacement of `data`, where the placeholder `a` is
replaced with `b` and the remaining string is padded with null characters.
All input arguments are expected to be bytes objects.
"""
import re
def replace(match):
occurances = match.group().count(a)
padding = (len(a) - len(b))*occurances
if padding < 0:
raise PaddingError(a, b, padding)
return match.group().replace(a, b) + b'\0' * padding
pat = re.compile(re.escape(a) + b'([^\0]*?)\0')
res = pat.sub(replace, data)
assert len(res) == len(data)
return res
def update_prefix(path, new_prefix, placeholder=prefix_placeholder,
mode='text'):
if on_win and (placeholder != prefix_placeholder) and ('/' in placeholder):
# original prefix uses unix-style path separators
# replace with unix-style path separators
new_prefix = new_prefix.replace('\\', '/')
path = os.path.realpath(path)
with open(path, 'rb') as fi:
data = fi.read()
if mode == 'text':
new_data = data.replace(placeholder.encode('utf-8'),
new_prefix.encode('utf-8'))
elif mode == 'binary':
new_data = binary_replace(data, placeholder.encode('utf-8'),
new_prefix.encode('utf-8'))
else:
sys.exit("Invalid mode:" % mode)
if new_data == data:
return
st = os.lstat(path)
os.remove(path) # Remove file before rewriting to avoid destroying hard-linked cache.
with open(path, 'wb') as fo:
fo.write(new_data)
os.chmod(path, stat.S_IMODE(st.st_mode))
def name_dist(dist):
return dist.rsplit('-', 2)[0]
def create_meta(prefix, dist, info_dir, extra_info):
"""
Create the conda metadata, in a given prefix, for a given package.
"""
# read info/index.json first
with open(join(info_dir, 'index.json')) as fi:
meta = json.load(fi)
# add extra info
meta.update(extra_info)
# write into <env>/conda-meta/<dist>.json
meta_dir = join(prefix, 'conda-meta')
if not isdir(meta_dir):
os.makedirs(meta_dir)
with open(join(meta_dir, dist + '.json'), 'w') as fo:
json.dump(meta, fo, indent=2, sort_keys=True)
def mk_menus(prefix, files, remove=False):
"""
Create cross-platform menu items (e.g. Windows Start Menu)
Passes all menu config files %PREFIX%/Menu/*.json to ``menuinst.install``.
``remove=True`` will remove the menu items.
"""
menu_files = [f for f in files
if f.lower().startswith('menu/')
and f.lower().endswith('.json')]
if not menu_files:
return
elif basename(abspath(prefix)).startswith('_'):
logging.warn("Environment name starts with underscore '_'. "
"Skipping menu installation.")
return
try:
import menuinst
except:
logging.warn("Menuinst could not be imported:")
logging.warn(traceback.format_exc())
return
env_name = (None if abspath(prefix) == abspath(sys.prefix) else
basename(prefix))
env_setup_cmd = ("activate %s" % env_name) if env_name else None
for f in menu_files:
try:
if menuinst.__version__.startswith('1.0'):
menuinst.install(join(prefix, f), remove, prefix)
else:
menuinst.install(join(prefix, f), remove,
root_prefix=sys.prefix,
target_prefix=prefix, env_name=env_name,
env_setup_cmd=env_setup_cmd)
except:
stdoutlog.error("menuinst Exception:")
stdoutlog.error(traceback.format_exc())
def run_script(prefix, dist, action='post-link', env_prefix=None):
"""
call the post-link (or pre-unlink) script, and return True on success,
False on failure
"""
path = join(prefix, 'Scripts' if on_win else 'bin', '.%s-%s.%s' % (
name_dist(dist),
action,
'bat' if on_win else 'sh'))
if not isfile(path):
return True
if on_win:
try:
args = [os.environ['COMSPEC'], '/c', path]
except KeyError:
return False
else:
args = ['/bin/bash', path]
env = os.environ
env['PREFIX'] = str(env_prefix or prefix)
env['PKG_NAME'], env['PKG_VERSION'], env['PKG_BUILDNUM'] = \
str(dist).rsplit('-', 2)
if action == 'pre-link':
env['SOURCE_DIR'] = str(prefix)
try:
subprocess.check_call(args, env=env)
except subprocess.CalledProcessError:
return False
return True
def read_url(pkgs_dir, dist):
try:
data = open(join(pkgs_dir, 'urls.txt')).read()
urls = data.split()
for url in urls[::-1]:
if url.endswith('/%s.tar.bz2' % dist):
return url
except IOError:
pass
return None
def read_icondata(source_dir):
import base64
try:
data = open(join(source_dir, 'info', 'icon.png'), 'rb').read()
return base64.b64encode(data).decode('utf-8')
except IOError:
pass
return None
def read_no_link(info_dir):
res = set()
for fn in 'no_link', 'no_softlink':
try:
res.update(set(yield_lines(join(info_dir, fn))))
except IOError:
pass
return res
# Should this be an API function?
def symlink_conda(prefix, root_dir):
root_conda = join(root_dir, 'bin', 'conda')
root_activate = join(root_dir, 'bin', 'activate')
root_deactivate = join(root_dir, 'bin', 'deactivate')
prefix_conda = join(prefix, 'bin', 'conda')
prefix_activate = join(prefix, 'bin', 'activate')
prefix_deactivate = join(prefix, 'bin', 'deactivate')
if not os.path.lexists(join(prefix, 'bin')):
os.makedirs(join(prefix, 'bin'))
| |
hasattr( visitor, "visitLexerAtomCharSet" ):
return visitor.visitLexerAtomCharSet(self)
else:
return visitor.visitChildren(self)
class LexerAtomWildcardContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def DOT(self):
return self.getToken(ANTLRv4Parser.DOT, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomWildcard" ):
listener.enterLexerAtomWildcard(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomWildcard" ):
listener.exitLexerAtomWildcard(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomWildcard" ):
return visitor.visitLexerAtomWildcard(self)
else:
return visitor.visitChildren(self)
class LexerAtomTerminalContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def terminal(self):
return self.getTypedRuleContext(ANTLRv4Parser.TerminalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomTerminal" ):
listener.enterLexerAtomTerminal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomTerminal" ):
listener.exitLexerAtomTerminal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomTerminal" ):
return visitor.visitLexerAtomTerminal(self)
else:
return visitor.visitChildren(self)
class LexerAtomDocContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def DOC_COMMENT(self):
return self.getToken(ANTLRv4Parser.DOC_COMMENT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomDoc" ):
listener.enterLexerAtomDoc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomDoc" ):
listener.exitLexerAtomDoc(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomDoc" ):
return visitor.visitLexerAtomDoc(self)
else:
return visitor.visitChildren(self)
def lexerAtom(self):
localctx = ANTLRv4Parser.LexerAtomContext(self, self._ctx, self.state)
self.enterRule(localctx, 98, self.RULE_lexerAtom)
self._la = 0 # Token type
try:
self.state = 531
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,66,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.LexerAtomRangeContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 522
self.characterRange()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.LexerAtomTerminalContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 523
self.terminal()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.LexerAtomNotContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 524
self.notSet()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.LexerAtomCharSetContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 525
localctx.value = self.match(ANTLRv4Parser.LEXER_CHAR_SET)
pass
elif la_ == 5:
localctx = ANTLRv4Parser.LexerAtomWildcardContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 526
self.match(ANTLRv4Parser.DOT)
self.state = 528
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 527
self.elementOptions()
pass
elif la_ == 6:
localctx = ANTLRv4Parser.LexerAtomDocContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 530
localctx.value = self.match(ANTLRv4Parser.DOC_COMMENT)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AtomContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_atom
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class AtomTerminalContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def terminal(self):
return self.getTypedRuleContext(ANTLRv4Parser.TerminalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomTerminal" ):
listener.enterAtomTerminal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomTerminal" ):
listener.exitAtomTerminal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomTerminal" ):
return visitor.visitAtomTerminal(self)
else:
return visitor.visitChildren(self)
class AtomWildcardContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def DOT(self):
return self.getToken(ANTLRv4Parser.DOT, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomWildcard" ):
listener.enterAtomWildcard(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomWildcard" ):
listener.exitAtomWildcard(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomWildcard" ):
return visitor.visitAtomWildcard(self)
else:
return visitor.visitChildren(self)
class AtomRuleRefContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def ruleref(self):
return self.getTypedRuleContext(ANTLRv4Parser.RulerefContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomRuleRef" ):
listener.enterAtomRuleRef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomRuleRef" ):
listener.exitAtomRuleRef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomRuleRef" ):
return visitor.visitAtomRuleRef(self)
else:
return visitor.visitChildren(self)
class AtomNotContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def notSet(self):
return self.getTypedRuleContext(ANTLRv4Parser.NotSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomNot" ):
listener.enterAtomNot(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomNot" ):
listener.exitAtomNot(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomNot" ):
return visitor.visitAtomNot(self)
else:
return visitor.visitChildren(self)
def atom(self):
localctx = ANTLRv4Parser.AtomContext(self, self._ctx, self.state)
self.enterRule(localctx, 100, self.RULE_atom)
self._la = 0 # Token type
try:
self.state = 540
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.STRING_LITERAL]:
localctx = ANTLRv4Parser.AtomTerminalContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 533
self.terminal()
pass
elif token in [ANTLRv4Parser.RULE_REF]:
localctx = ANTLRv4Parser.AtomRuleRefContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 534
self.ruleref()
pass
elif token in [ANTLRv4Parser.NOT]:
localctx = ANTLRv4Parser.AtomNotContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 535
self.notSet()
pass
elif token in [ANTLRv4Parser.DOT]:
localctx = ANTLRv4Parser.AtomWildcardContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 536
self.match(ANTLRv4Parser.DOT)
self.state = 538
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 537
self.elementOptions()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NotSetContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_notSet
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class NotBlockContext(NotSetContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.NotSetContext
super().__init__(parser)
self.value = None # BlockSetContext
self.copyFrom(ctx)
def NOT(self):
return self.getToken(ANTLRv4Parser.NOT, 0)
def blockSet(self):
return self.getTypedRuleContext(ANTLRv4Parser.BlockSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNotBlock" ):
listener.enterNotBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNotBlock" ):
listener.exitNotBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNotBlock" ):
return visitor.visitNotBlock(self)
else:
return visitor.visitChildren(self)
class NotElementContext(NotSetContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.NotSetContext
super().__init__(parser)
self.value = None # SetElementContext
self.copyFrom(ctx)
def NOT(self):
return self.getToken(ANTLRv4Parser.NOT, 0)
def setElement(self):
return self.getTypedRuleContext(ANTLRv4Parser.SetElementContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNotElement" ):
listener.enterNotElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNotElement" ):
listener.exitNotElement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNotElement" ):
return visitor.visitNotElement(self)
else:
return visitor.visitChildren(self)
def notSet(self):
localctx = ANTLRv4Parser.NotSetContext(self, self._ctx, self.state)
self.enterRule(localctx, 102, self.RULE_notSet)
try:
self.state = 546
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,69,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.NotElementContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 542
self.match(ANTLRv4Parser.NOT)
self.state = 543
localctx.value = self.setElement()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.NotBlockContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 544
self.match(ANTLRv4Parser.NOT)
self.state = 545
localctx.value = self.blockSet()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockSetContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._setElement = None # SetElementContext
self.elements = list() # of SetElementContexts
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def setElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.SetElementContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.SetElementContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_blockSet
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlockSet" ):
listener.enterBlockSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlockSet" ):
listener.exitBlockSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlockSet" ):
return visitor.visitBlockSet(self)
else:
return visitor.visitChildren(self)
def blockSet(self):
localctx = ANTLRv4Parser.BlockSetContext(self, self._ctx, self.state)
self.enterRule(localctx, 104, self.RULE_blockSet)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 548
self.match(ANTLRv4Parser.LPAREN)
self.state = 549
localctx._setElement = self.setElement()
localctx.elements.append(localctx._setElement)
self.state = 554
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 550
self.match(ANTLRv4Parser.OR)
self.state = 551
localctx._setElement = self.setElement()
localctx.elements.append(localctx._setElement)
self.state = 556
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 557
self.match(ANTLRv4Parser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SetElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_setElement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class SetElementRefContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementRef" ):
listener.enterSetElementRef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementRef" ):
listener.exitSetElementRef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementRef" ):
return visitor.visitSetElementRef(self)
else:
return visitor.visitChildren(self)
class SetElementRangeContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.copyFrom(ctx)
def characterRange(self):
return self.getTypedRuleContext(ANTLRv4Parser.CharacterRangeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementRange" ):
listener.enterSetElementRange(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementRange" ):
listener.exitSetElementRange(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementRange" ):
return visitor.visitSetElementRange(self)
else:
return visitor.visitChildren(self)
class SetElementLitContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementLit" ):
listener.enterSetElementLit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementLit" ):
listener.exitSetElementLit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementLit" ):
return visitor.visitSetElementLit(self)
else:
return visitor.visitChildren(self)
class SetElementCharSetContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def LEXER_CHAR_SET(self):
return self.getToken(ANTLRv4Parser.LEXER_CHAR_SET, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementCharSet" ):
listener.enterSetElementCharSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementCharSet" ):
listener.exitSetElementCharSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementCharSet" ):
return visitor.visitSetElementCharSet(self)
else:
return visitor.visitChildren(self)
def setElement(self):
localctx = ANTLRv4Parser.SetElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 106, self.RULE_setElement)
self._la = 0 # Token type
try:
self.state = 569
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,73,self._ctx)
if la_ == 1:
localctx | |
:type P1: gp_Parab2d
:param D1:
:type D1: IntRes2d_Domain &
:param P2:
:type P2: gp_Parab2d
:param D2:
:type D2: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between a parabola and an hyperbola.
:param P:
:type P: gp_Parab2d
:param DP:
:type DP: IntRes2d_Domain &
:param H:
:type H: gp_Hypr2d
:param DH:
:type DH: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between 2 hyperbolas.
:param H1:
:type H1: gp_Hypr2d
:param D1:
:type D1: IntRes2d_Domain &
:param H2:
:type H2: gp_Hypr2d
:param D2:
:type D2: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
"""
_IntCurve.IntCurve_IntConicConic_swiginit(self,_IntCurve.new_IntCurve_IntConicConic(*args))
def Perform(self, *args):
"""
* Intersection between 2 lines from gp.
:param L1:
:type L1: gp_Lin2d
:param D1:
:type D1: IntRes2d_Domain &
:param L2:
:type L2: gp_Lin2d
:param D2:
:type D2: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between a line and a circle. The exception ConstructionError is raised if the method IsClosed of the domain of the circle returns False.
:param L:
:type L: gp_Lin2d
:param DL:
:type DL: IntRes2d_Domain &
:param C:
:type C: gp_Circ2d
:param DC:
:type DC: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between a line and an ellipse. The exception ConstructionError is raised if the method IsClosed of the domain of the ellipse returns False.
:param L:
:type L: gp_Lin2d
:param DL:
:type DL: IntRes2d_Domain &
:param E:
:type E: gp_Elips2d
:param DE:
:type DE: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between a line and a parabola from gp.
:param L:
:type L: gp_Lin2d
:param DL:
:type DL: IntRes2d_Domain &
:param P:
:type P: gp_Parab2d
:param DP:
:type DP: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between a line and an hyperbola.
:param L:
:type L: gp_Lin2d
:param DL:
:type DL: IntRes2d_Domain &
:param H:
:type H: gp_Hypr2d
:param DH:
:type DH: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between 2 circles from gp. The exception ConstructionError is raised if the method IsClosed of the domain of one of the circle returns False.
:param C1:
:type C1: gp_Circ2d
:param D1:
:type D1: IntRes2d_Domain &
:param C2:
:type C2: gp_Circ2d
:param D2:
:type D2: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between a circle and an ellipse. The exception ConstructionError is raised if the method IsClosed of one the domain returns False.
:param C:
:type C: gp_Circ2d
:param DC:
:type DC: IntRes2d_Domain &
:param E:
:type E: gp_Elips2d
:param DE:
:type DE: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between a circle and a parabola. The exception ConstructionError is raised if the method IsClosed of the domain of the circle returns False.
:param C:
:type C: gp_Circ2d
:param DC:
:type DC: IntRes2d_Domain &
:param P:
:type P: gp_Parab2d
:param DP:
:type DP: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between a circle and an hyperbola. The exception ConstructionError is raised if the method IsClosed of the domain of the circle returns False.
:param C:
:type C: gp_Circ2d
:param DC:
:type DC: IntRes2d_Domain &
:param H:
:type H: gp_Hypr2d
:param DH:
:type DH: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between 2 ellipses. The exception ConstructionError is raised if the method IsClosed of one of the domain returns False.
:param E1:
:type E1: gp_Elips2d
:param D1:
:type D1: IntRes2d_Domain &
:param E2:
:type E2: gp_Elips2d
:param D2:
:type D2: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between an ellipse and a parabola. The exception ConstructionError is raised if the method IsClosed of the domain of the ellipse returns False.
:param E:
:type E: gp_Elips2d
:param DE:
:type DE: IntRes2d_Domain &
:param P:
:type P: gp_Parab2d
:param DP:
:type DP: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between an ellipse and an hyperbola. The exception ConstructionError is raised if the method IsClosed of the domain of the ellipse returns False.
:param E:
:type E: gp_Elips2d
:param DE:
:type DE: IntRes2d_Domain &
:param H:
:type H: gp_Hypr2d
:param DH:
:type DH: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between 2 parabolas.
:param P1:
:type P1: gp_Parab2d
:param D1:
:type D1: IntRes2d_Domain &
:param P2:
:type P2: gp_Parab2d
:param D2:
:type D2: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between a parabola and an hyperbola.
:param P:
:type P: gp_Parab2d
:param DP:
:type DP: IntRes2d_Domain &
:param H:
:type H: gp_Hypr2d
:param DH:
:type DH: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
* Intersection between 2 hyperbolas.
:param H1:
:type H1: gp_Hypr2d
:param D1:
:type D1: IntRes2d_Domain &
:param H2:
:type H2: gp_Hypr2d
:param D2:
:type D2: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
"""
return _IntCurve.IntCurve_IntConicConic_Perform(self, *args)
def __del__(self):
try:
self.thisown = False
GarbageCollector.garbage.collect_object(self)
except:
pass
IntCurve_IntConicConic.Perform = new_instancemethod(_IntCurve.IntCurve_IntConicConic_Perform,None,IntCurve_IntConicConic)
IntCurve_IntConicConic._kill_pointed = new_instancemethod(_IntCurve.IntCurve_IntConicConic__kill_pointed,None,IntCurve_IntConicConic)
IntCurve_IntConicConic_swigregister = _IntCurve.IntCurve_IntConicConic_swigregister
IntCurve_IntConicConic_swigregister(IntCurve_IntConicConic)
class IntCurve_IntImpConicParConic(OCC.IntRes2d.IntRes2d_Intersection):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param ITool:
:type ITool: IntCurve_IConicTool &
:param Dom1:
:type Dom1: IntRes2d_Domain &
:param PCurve:
:type PCurve: IntCurve_PConic &
:param Dom2:
:type Dom2: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
"""
_IntCurve.IntCurve_IntImpConicParConic_swiginit(self,_IntCurve.new_IntCurve_IntImpConicParConic(*args))
def Perform(self, *args):
"""
:param ITool:
:type ITool: IntCurve_IConicTool &
:param Dom1:
:type Dom1: IntRes2d_Domain &
:param PCurve:
:type PCurve: IntCurve_PConic &
:param Dom2:
:type Dom2: IntRes2d_Domain &
:param TolConf:
:type TolConf: float
:param Tol:
:type Tol: float
:rtype: None
"""
return _IntCurve.IntCurve_IntImpConicParConic_Perform(self, *args)
def FindU(self, *args):
"""
:param parameter:
:type parameter: float
:param point:
:type point: gp_Pnt2d
:param TheParCurev:
:type TheParCurev: IntCurve_PConic &
:param TheImpTool:
:type TheImpTool: IntCurve_IConicTool &
:rtype: float
"""
return _IntCurve.IntCurve_IntImpConicParConic_FindU(self, *args)
def FindV(self, *args):
"""
:param parameter:
:type parameter: float
:param point:
:type point: gp_Pnt2d
:param TheImpTool:
:type TheImpTool: IntCurve_IConicTool &
:param ParCurve:
:type ParCurve: IntCurve_PConic &
:param TheParCurveDomain:
:type TheParCurveDomain: IntRes2d_Domain &
:param V0:
:type V0: float
:param V1:
:type V1: float
:param Tolerance:
:type Tolerance: float
:rtype: float
"""
return _IntCurve.IntCurve_IntImpConicParConic_FindV(self, *args)
def And_Domaine_Objet1_Intersections(self, *args):
"""
:param TheImpTool:
:type TheImpTool: IntCurve_IConicTool &
:param TheParCurve:
:type TheParCurve: IntCurve_PConic &
:param TheImpCurveDomain:
:type TheImpCurveDomain: IntRes2d_Domain &
:param TheParCurveDomain:
:type TheParCurveDomain: IntRes2d_Domain &
:param NbResultats:
:type NbResultats: int &
:param Inter2_And_Domain2:
:type Inter2_And_Domain2: TColStd_Array1OfReal &
:param Inter1:
:type Inter1: TColStd_Array1OfReal &
:param Resultat1:
:type Resultat1: TColStd_Array1OfReal &
:param Resultat2:
:type Resultat2: TColStd_Array1OfReal &
:param EpsNul:
:type EpsNul: float
:rtype: None
"""
return _IntCurve.IntCurve_IntImpConicParConic_And_Domaine_Objet1_Intersections(self, *args)
def __del__(self):
try:
self.thisown = False
GarbageCollector.garbage.collect_object(self)
except:
pass
IntCurve_IntImpConicParConic.Perform = new_instancemethod(_IntCurve.IntCurve_IntImpConicParConic_Perform,None,IntCurve_IntImpConicParConic)
IntCurve_IntImpConicParConic.FindU = new_instancemethod(_IntCurve.IntCurve_IntImpConicParConic_FindU,None,IntCurve_IntImpConicParConic)
IntCurve_IntImpConicParConic.FindV = new_instancemethod(_IntCurve.IntCurve_IntImpConicParConic_FindV,None,IntCurve_IntImpConicParConic)
IntCurve_IntImpConicParConic.And_Domaine_Objet1_Intersections = new_instancemethod(_IntCurve.IntCurve_IntImpConicParConic_And_Domaine_Objet1_Intersections,None,IntCurve_IntImpConicParConic)
IntCurve_IntImpConicParConic._kill_pointed = new_instancemethod(_IntCurve.IntCurve_IntImpConicParConic__kill_pointed,None,IntCurve_IntImpConicParConic)
IntCurve_IntImpConicParConic_swigregister = _IntCurve.IntCurve_IntImpConicParConic_swigregister
IntCurve_IntImpConicParConic_swigregister(IntCurve_IntImpConicParConic)
class IntCurve_MyImpParToolOfIntImpConicParConic(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param IT:
:type IT: IntCurve_IConicTool &
:param PC:
:type PC: IntCurve_PConic &
:rtype: None
"""
_IntCurve.IntCurve_MyImpParToolOfIntImpConicParConic_swiginit(self,_IntCurve.new_IntCurve_MyImpParToolOfIntImpConicParConic(*args))
def Value(self, *args):
"""
:param Param:
:type Param: float
:param F:
:type F: float &
:rtype: bool
"""
return _IntCurve.IntCurve_MyImpParToolOfIntImpConicParConic_Value(self, *args)
def Derivative(self, *args):
"""
:param Param:
:type Param: float
:param D:
:type D: float &
:rtype: bool
"""
return _IntCurve.IntCurve_MyImpParToolOfIntImpConicParConic_Derivative(self, *args)
def Values(self, *args):
"""
:param Param:
:type Param: float
:param | |
self.shape_info = {'dims': self.state.dims,
'coords': self.state.coords}
else:
self.state = np.array([init_state_value] * self.order)
def __call__(self):
if self.shape_info:
return self.state[-1].reset_coords('_smooth', drop=True)
else:
return self.state[-1]
def ddt(self):
targets = np.roll(self.state, 1, axis=0)
if self.shape_info:
targets[0] = self.input_func().values
else:
targets[0] = self.input_func()
return (targets - self.state) * self.order / self.smooth_time_func()
def export(self):
return {self.py_name: {
'state': self.state,
'shape_info': self.shape_info}}
class Trend(DynamicStateful):
"""
Implements TREND function
"""
def __init__(self, trend_input, average_time, initial_trend,
py_name="Trend object"):
"""
Parameters
----------
trend_input: function
average_time: function
initial_trend: function
py_name: str
Python name to identify the object
"""
super().__init__()
self.init_func = initial_trend
self.average_time_function = average_time
self.input_func = trend_input
self.py_name = py_name
def initialize(self, init_val=None):
if init_val is None:
self.state = self.input_func()\
/ (1 + self.init_func()*self.average_time_function())
else:
self.state = self.input_func()\
/ (1 + init_val*self.average_time_function())
if isinstance(self.state, xr.DataArray):
self.shape_info = {'dims': self.state.dims,
'coords': self.state.coords}
def __call__(self):
return zidz(self.input_func() - self.state,
self.average_time_function() * np.abs(self.state))
def ddt(self):
return (self.input_func() - self.state) / self.average_time_function()
def export(self):
return {self.py_name: {
'state': self.state,
'shape_info': self.shape_info}}
class SampleIfTrue(DynamicStateful):
def __init__(self, condition, actual_value, initial_value,
py_name="SampleIfTrue object"):
"""
Parameters
----------
condition: function
actual_value: function
initial_value: function
py_name: str
Python name to identify the object
"""
super().__init__()
self.condition = condition
self.actual_value = actual_value
self.init_func = initial_value
self.py_name = py_name
def initialize(self, init_val=None):
if init_val is None:
self.state = self.init_func()
else:
self.state = init_val
if isinstance(self.state, xr.DataArray):
self.shape_info = {'dims': self.state.dims,
'coords': self.state.coords}
def __call__(self):
return if_then_else(self.condition(),
self.actual_value,
lambda: self.state)
def ddt(self):
return np.nan
def update(self, state):
self.state = self.state*0 + if_then_else(self.condition(),
self.actual_value,
lambda: self.state)
def export(self):
return {self.py_name: {
'state': self.state,
'shape_info': self.shape_info}}
class Initial(Stateful):
"""
Implements INITIAL function
"""
def __init__(self, initial_value, py_name="Initial object"):
"""
Parameters
----------
initial_value: function
py_name: str
Python name to identify the object
"""
super().__init__()
self.init_func = initial_value
self.py_name = py_name
def initialize(self, init_val=None):
if init_val is None:
self.state = self.init_func()
else:
self.state = init_val
def export(self):
return {self.py_name: {
'state': self.state}}
class Macro(DynamicStateful):
"""
The Model class implements a stateful representation of the system,
and contains the majority of methods for accessing and modifying model
components.
When the instance in question also serves as the root model object
(as opposed to a macro or submodel within another model) it will have
added methods to facilitate execution.
"""
def __init__(self, py_model_file, params=None, return_func=None,
time=None, time_initialization=None, py_name=None):
"""
The model object will be created with components drawn from a
translated python model file.
Parameters
----------
py_model_file : <string>
Filename of a model which has already been converted into a
python format.
get_time:
needs to be a function that returns a time object
params
return_func
"""
super().__init__()
self.time = time
self.time_initialization = time_initialization
self.py_name = py_name
self.initialize_order = None
# need a unique identifier for the imported module.
module_name = os.path.splitext(py_model_file)[0]\
+ str(random.randint(0, 1000000))
try:
self.components = SourceFileLoader(module_name,
py_model_file).load_module()
except TypeError:
raise ImportError(
"\n\nNot able to import the model. "
+ "This may be because the model was compiled with an "
+ "earlier version of PySD, you can check on the top of "
+ " the model file you are trying to load."
+ "\nThe current version of PySd is :"
+ "\n\tPySD " + __version__ + "\n\n"
+ "Please translate again the model with the function"
+ " read_vensim or read_xmile.")
if __version__.split(".")[0]\
!= self.get_pysd_compiler_version().split(".")[0]:
raise ImportError(
"\n\nNot able to import the model. "
+ "The model was compiled with a "
+ "not compatible version of PySD:"
+ "\n\tPySD " + self.get_pysd_compiler_version()
+ "\n\nThe current version of PySd is:"
+ "\n\tPySD " + __version__ + "\n\n"
+ "Please translate again the model with the function"
+ " read_vensim or read_xmile.")
if params is not None:
self.set_components(params)
# Get the collections of stateful elements and external elements
self._stateful_elements = [
getattr(self.components, name) for name in dir(self.components)
if isinstance(getattr(self.components, name), Stateful)
]
self._dynamicstateful_elements = [
getattr(self.components, name) for name in dir(self.components)
if isinstance(getattr(self.components, name), DynamicStateful)
]
self._external_elements = [
getattr(self.components, name) for name in dir(self.components)
if isinstance(getattr(self.components, name), External)
]
if return_func is not None:
self.return_func = getattr(self.components, return_func)
else:
self.return_func = lambda: 0
self.py_model_file = py_model_file
def __call__(self):
return self.return_func()
def get_pysd_compiler_version(self):
"""
Returns the version of pysd complier that used for generating
this model
"""
return self.components.__pysd_version__
def initialize(self, initialization_order=None):
"""
This function tries to initialize the stateful objects.
In the case where an initialization function for `Stock A` depends on
the value of `Stock B`, if we try to initialize `Stock A` before
`Stock B` then we will get an error, as the value will not yet exist.
In this case, just skip initializing `Stock A` for now, and
go on to the other state initializations. Then come back to it and
try again.
"""
# Initialize time
if self.time is None:
self.time = self.time_initialization()
self.components.cache.clean()
self.components.cache.time = self.time()
self.components._init_outer_references({
'scope': self,
'time': self.time
})
# Initialize external elements
for element in self._external_elements:
element.initialize()
Excels.clean()
remaining = set(self._stateful_elements)
if len(set([element.py_name for element in self._stateful_elements]))\
== len(set(self._stateful_elements)) and self.initialize_order:
# use elements names to initialize them, this is available
# after the model is initialized one time
# solves issue #247 until we have a dependency dictionary
try:
for element_name in self.initialize_order:
for element in remaining:
if element.py_name == element_name:
element.initialize()
break
remaining.remove(element)
assert len(remaining) == 0
return
except Exception as err:
# if user includes new stateful objects or some other
# dependencies the previous initialization order may
# not be keept
warnings.warn(
err.args[0] +
"\n\nNot able to initialize statefull elements "
"with the same order as before..."
"Trying to find a new order.")
# initialize as always
self.initialize_order = []
# Initialize stateful elements
remaining = set(self._stateful_elements)
while remaining:
progress = set()
for element in remaining:
try:
element.initialize()
progress.add(element)
self.initialize_order.append(element.py_name)
except (KeyError, TypeError, AttributeError):
pass
if progress:
remaining.difference_update(progress)
else:
raise ValueError('Unresolvable Reference: '
+ 'Probable circular initialization...\n'
+ 'Not able to initialize the '
+ 'following objects:\n\t'
+ '\n\t'.join([e.py_name for e in remaining]))
def ddt(self):
return np.array([component.ddt() for component
in self._dynamicstateful_elements], dtype=object)
@property
def state(self):
return np.array([component.state for component
in self._dynamicstateful_elements], dtype=object)
@state.setter
def state(self, new_value):
[component.update(val) for component, val
in zip(self._dynamicstateful_elements, new_value)]
def export(self, file_name):
"""
Export stateful values to pickle file.
Parameters
----------
file_name: str
Name of the file to export the values.
"""
warnings.warn(
"\nCompatibility of exported states could be broken between"
" different versions of PySD or xarray, current versions:\n"
f"\tPySD {__version__}\n\txarray {xr.__version__}\n"
)
stateful_elements = {}
[stateful_elements.update(component.export()) for component
in self._stateful_elements]
with open(file_name, 'wb') as file:
pickle.dump(
(self.time(),
stateful_elements,
{'pysd': __version__, 'xarray': xr.__version__}
), file)
def import_pickle(self, file_name):
"""
Import stateful values from pickle file.
Parameters
----------
file_name: str
Name of the file to import the values from.
"""
with open(file_name, 'rb') as file:
time, stateful_dict, metadata = pickle.load(file)
if __version__ != metadata['pysd']\
or xr.__version__ != metadata['xarray']:
warnings.warn(
"\nCompatibility of exported states could be broken between"
" different versions of PySD or xarray. Current versions:\n"
f"\tPySD {__version__}\n\txarray {xr.__version__}\n"
"Loaded versions:\n"
f"\tPySD {metadata['pysd']}\n\txarray {metadata['xarray']}\n"
)
self.set_stateful(stateful_dict)
self.time.update(time)
self.components.cache.reset(time)
def get_args(self, param):
"""
Returns the arguments of a model element.
Parameters
----------
param: str or func
The model element name or function.
Returns
-------
args: list
List of arguments of the function.
Examples
--------
>>> model.get_args('birth_rate')
>>> model.get_args('Birth Rate')
"""
if isinstance(param, str):
func_name = utils.get_value_by_insensitive_key_or_value(
param,
self.components._namespace) or param
if hasattr(self.components, func_name):
func = getattr(self.components, func_name)
else:
NameError(
"\n'%s' is not recognized as a model component."
% param)
else:
func = param
if hasattr(func, 'args'):
# cached functions
return func.args
else:
# regular functions
args = inspect.getfullargspec(func)[0]
if 'self' in args:
args.remove('self')
return args
def get_coords(self, param):
"""
Returns the coordinates and dims of a model element.
Parameters
----------
param: str or func
The model element name or function.
Returns
-------
(coords, dims) or None: (dict, list) or None
The coords and the dimensions of the element if it has.
Otherwise, returns None.
Examples
--------
>>> model.get_coords('birth_rate')
>>> model.get_coords('Birth Rate')
"""
if isinstance(param, | |
'''
Code for handling files that are band sequential (BSQ).
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import array
import logging
import numpy as np
import os
import sys
import spectral as spy
from ..utilities.python23 import typecode, tobytes, frombytes
from .spyfile import SpyFile, MemmapFile
byte_typecode = typecode('b')
class BsqFile(SpyFile, MemmapFile):
'''
A class to represent image files stored with bands sequential.
'''
def __init__(self, params, metadata=None):
self.interleave = spy.BSQ
if metadata is None:
metadata = {}
SpyFile.__init__(self, params, metadata)
self._memmap = self._open_memmap('r')
def _open_memmap(self, mode):
logger = logging.getLogger('spectral')
if (os.path.getsize(self.filename) < sys.maxsize):
try:
(R, C, B) = self.shape
return np.memmap(self.filename, dtype=self.dtype, mode=mode,
offset=self.offset, shape=(B, R, C))
except:
logger.debug('Unable to create memmap interface.')
return None
else:
return None
def read_band(self, band, use_memmap=True):
'''Reads a single band from the image.
Arguments:
`band` (int):
Index of band to read.
`use_memmap` (bool, default True):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., if `img.using_memmap` is True).
Returns:
:class:`numpy.ndarray`
An `MxN` array of values for the specified band.
'''
if self._memmap is not None and use_memmap is True:
data = np.array(self._memmap[band, :, :])
if self.scale_factor != 1:
data = data / float(self.scale_factor)
return data
vals = array.array(byte_typecode)
offset = self.offset + band * self.sample_size * \
self.nrows * self.ncols
f = self.fid
# Pixel format is BSQ, so read the whole band at once.
f.seek(offset, 0)
vals.fromfile(f, self.nrows * self.ncols * self.sample_size)
arr = np.frombuffer(tobytes(vals), dtype=self.dtype)
arr = arr.reshape(self.nrows, self.ncols)
if self.scale_factor != 1:
return arr / float(self.scale_factor)
return arr
def read_bands(self, bands, use_memmap=False):
'''Reads multiple bands from the image.
Arguments:
`bands` (list of ints):
Indices of bands to read.
`use_memmap` (bool, default False):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., if `img.using_memmap` is True).
Returns:
:class:`numpy.ndarray`
An `MxNxL` array of values for the specified bands. `M` and `N`
are the number of rows & columns in the image and `L` equals
len(`bands`).
'''
if self._memmap is not None and use_memmap is True:
data = np.array(self._memmap[bands, :, :]).transpose((1, 2, 0))
if self.scale_factor != 1:
data = data / float(self.scale_factor)
return data
f = self.fid
arr = np.zeros((self.nrows, self.ncols, len(bands)), dtype=self.dtype)
for j in range(len(bands)):
vals = array.array(byte_typecode)
offset = self.offset + (bands[j]) * self.sample_size \
* self.nrows * self.ncols
# Pixel format is BSQ, so read an entire band at time.
f.seek(offset, 0)
vals.fromfile(f, self.nrows * self.ncols * self.sample_size)
band = np.frombuffer(tobytes(vals), dtype=self.dtype)
arr[:, :, j] = band.reshape(self.nrows, self.ncols)
if self.scale_factor != 1:
return arr / float(self.scale_factor)
return arr
def read_pixel(self, row, col, use_memmap=True):
'''Reads the pixel at position (row,col) from the file.
Arguments:
`row`, `col` (int):
Indices of the row & column for the pixel
`use_memmap` (bool, default True):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., if `img.using_memmap` is True).
Returns:
:class:`numpy.ndarray`
A length-`B` array, where `B` is the number of image bands.
'''
if self._memmap is not None and use_memmap is True:
data = np.array(self._memmap[:, row, col])
if self.scale_factor != 1:
data = data / float(self.scale_factor)
return data
vals = array.array(byte_typecode)
delta = self.sample_size * (self.nbands - 1)
offset = self.offset + row * self.nbands * self.ncols \
* self.sample_size + col * self.sample_size
f = self.fid
nPixels = self.nrows * self.ncols
ncols = self.ncols
sampleSize = self.sample_size
bandSize = sampleSize * nPixels
rowSize = sampleSize * self.ncols
for i in range(self.nbands):
f.seek(self.offset
+ i * bandSize
+ row * rowSize
+ col * sampleSize, 0)
vals.fromfile(f, sampleSize)
pixel = np.frombuffer(tobytes(vals), dtype=self.dtype)
if self.scale_factor != 1:
return pixel / float(self.scale_factor)
return pixel
def read_subregion(self, row_bounds, col_bounds, bands=None,
use_memmap=True):
'''
Reads a contiguous rectangular sub-region from the image.
Arguments:
`row_bounds` (2-tuple of ints):
(a, b) -> Rows a through b-1 will be read.
`col_bounds` (2-tuple of ints):
(a, b) -> Columnss a through b-1 will be read.
`bands` (list of ints):
Optional list of bands to read. If not specified, all bands
are read.
`use_memmap` (bool, default True):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., if `img.using_memmap` is True).
Returns:
:class:`numpy.ndarray`
An `MxNxL` array.
'''
if self._memmap is not None and use_memmap is True:
if bands is None:
data = np.array(self._memmap[:, row_bounds[0]: row_bounds[1],
col_bounds[0]: col_bounds[1]])
else:
data = np.array(
self._memmap[bands, row_bounds[0]: row_bounds[1],
col_bounds[0]: col_bounds[1]])
data = data.transpose((1, 2, 0))
if self.scale_factor != 1:
data = data / float(self.scale_factor)
return data
nSubRows = row_bounds[1] - row_bounds[0] # Rows in sub-image
nSubCols = col_bounds[1] - col_bounds[0] # Cols in sub-image
f = self.fid
f.seek(self.offset, 0)
# Increments between bands
if bands is None:
# Read all bands.
bands = list(range(self.nbands))
arr = np.zeros((nSubRows, nSubCols, len(bands)), dtype=self.dtype)
nrows = self.nrows
ncols = self.ncols
sampleSize = self.sample_size
bandSize = nrows * ncols * sampleSize
colStartOffset = col_bounds[0] * sampleSize
rowSize = ncols * sampleSize
rowStartOffset = row_bounds[0] * rowSize
nSubBands = len(bands)
# Pixel format is BSQ
for i in bands:
vals = array.array(byte_typecode)
bandOffset = i * bandSize
for j in range(row_bounds[0], row_bounds[1]):
f.seek(self.offset
+ bandOffset
+ j * rowSize
+ colStartOffset, 0)
vals.fromfile(f, nSubCols * sampleSize)
subArray = np.frombuffer(tobytes(vals),
dtype=self.dtype).reshape((nSubRows,
nSubCols))
arr[:, :, i] = subArray
if self.scale_factor != 1:
return arr / float(self.scale_factor)
return arr
def read_subimage(self, rows, cols, bands=None, use_memmap=False):
'''
Reads arbitrary rows, columns, and bands from the image.
Arguments:
`rows` (list of ints):
Indices of rows to read.
`cols` (list of ints):
Indices of columns to read.
`bands` (list of ints):
Optional list of bands to read. If not specified, all bands
are read.
`use_memmap` (bool, default False):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., if `img.using_memmap` is True).
Returns:
:class:`numpy.ndarray`
An `MxNxL` array, where `M` = len(`rows`), `N` = len(`cols`),
and `L` = len(bands) (or # of image bands if `bands` == None).
'''
if self._memmap is not None and use_memmap is True:
if bands is None:
data = np.array(self._memmap[:].take(rows, 1).take(cols, 2))
else:
data = np.array(
self._memmap.take(bands, 0).take(rows, 1).take(cols, 2))
data = data.transpose((1, 2, 0))
if self.scale_factor != 1:
data = data / float(self.scale_factor)
return data
nSubRows = len(rows) # Rows in sub-image
nSubCols = len(cols) # Cols in sub-image
d_col = self.sample_size
d_band = d_col * self.ncols
d_row = d_band * self.nbands
f = self.fid
f.seek(self.offset, 0)
# Increments between bands
if bands is None:
# Read all bands.
bands = list(range(self.nbands))
nSubBands = len(bands)
arr = np.zeros((nSubRows, nSubCols, nSubBands), dtype=self.dtype)
offset = self.offset
vals = array.array(byte_typecode)
nrows = self.nrows
ncols = self.ncols
sampleSize = self.sample_size
bandSize = nrows * ncols * sampleSize
sampleSize = self.sample_size
rowSize = ncols * sampleSize
# Pixel format is BSQ
for i in bands:
bandOffset = offset + i * bandSize
for j in rows:
rowOffset = j * rowSize
for k in cols:
f.seek(bandOffset
+ rowOffset
+ k * sampleSize, 0)
vals.fromfile(f, sampleSize)
arr = np.frombuffer(tobytes(vals), dtype=self.dtype)
arr = arr.reshape(nSubBands, nSubRows, nSubCols)
arr = np.transpose(arr, (1, 2, 0))
if self.scale_factor != 1:
return arr / float(self.scale_factor)
return arr
def read_datum(self, i, j, k, use_memmap=True):
'''Reads the band `k` value for pixel at row `i` and column `j`.
Arguments:
`i`, `j`, `k` (integer):
Row, column and band index, respectively.
`use_memmap` (bool, default True):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., | |
numberOfErrors + 1
wrongChars = wrongChars + "w" + ", "
if guessChar == "X" or guessChar == "x" :
if word[1] == "X" or word[1] == "x" :
toGuess = toGuess[:1] + "x" + toGuess[2:]
if word[2] == "X" or word[2] == "x" :
toGuess = toGuess[:2] + "x" + toGuess[3:]
if word[3] == "X" or word[3] == "x" :
toGuess = toGuess[:3] + "x" + toGuess[4:]
if word[1] != "X" and word[1] != "x" and word[2] != "X" and word[2] != "x" and word[3] != "X" and word[3] != "x" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "x" + ", "
if guessChar == "Y" or guessChar == "y" :
if word[1] == "Y" or word[1] == "y" :
toGuess = toGuess[:1] + "y" + toGuess[2:]
if word[2] == "Y" or word[2] == "y" :
toGuess = toGuess[:2] + "y" + toGuess[3:]
if word[3] == "Y" or word[3] == "y" :
toGuess = toGuess[:3] + "y" + toGuess[4:]
if word[1] != "Y" and word[1] != "y" and word[2] != "Y" and word[2] != "y" and word[3] != "Y" and word[3] != "y" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "y" + ", "
if guessChar == "Z" or guessChar == "z" :
if word[1] == "Z" or word[1] == "z" :
toGuess = toGuess[:1] + "z" + toGuess[2:]
if word[2] == "Z" or word[2] == "z" :
toGuess = toGuess[:2] + "z" + toGuess[3:]
if word[3] == "Z" or word[3] == "z" :
toGuess = toGuess[:3] + "z" + toGuess[4:]
if word[1] != "Z" and word[1] != "z" and word[2] != "Z" and word[2] != "z" and word[3] != "Z" and word[3] != "z" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "z" + ", "
if numberOfErrors == 0 :
print("\t___________")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 1 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 2 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 3 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 4 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|\\")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 5 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|\\")
print("\t| / ")
print("\t|")
print("\t|")
print("\n\tWord: " + toGuess)
print("\tMisses: " + wrongChars)
if "_" in toGuess and not loser :
guessChar = ""
while not guessChar.isalpha() :
guessChar = input("\n---------------------------------\nEnter your letter: ")
_ = os.system('cls' if os.name=='nt' else 'clear')
if guessChar == "A" or guessChar == "a" :
if word[1] == "A" or word[1] == "a" :
toGuess = toGuess[:1] + "a" + toGuess[2:]
if word[2] == "A" or word[2] == "a" :
toGuess = toGuess[:2] + "a" + toGuess[3:]
if word[3] == "A" or word[3] == "a" :
toGuess = toGuess[:3] + "a" + toGuess[4:]
if word[1] != "A" and word[1] != "a" and word[2] != "A" and word[2] != "a" and word[3] != "A" and word[3] != "a" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "a" + ", "
if guessChar == "B" or guessChar == "b" :
if word[1] == "B" or word[1] == "b" :
toGuess = toGuess[:1] + "b" + toGuess[2:]
if word[2] == "B" or word[2] == "b" :
toGuess = toGuess[:2] + "b" + toGuess[3:]
if word[3] == "B" or word[3] == "b" :
toGuess = toGuess[:3] + "b" + toGuess[4:]
if word[1] != "B" and word[1] != "b" and word[2] != "B" and word[2] != "b" and word[3] != "B" and word[3] != "b" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "b" + ", "
if guessChar == "C" or guessChar == "c" :
if word[1] == "C" or word[1] == "c" :
toGuess = toGuess[:1] + "c" + toGuess[2:]
if word[2] == "C" or word[2] == "c" :
toGuess = toGuess[:2] + "c" + toGuess[3:]
if word[3] == "C" or word[3] == "c" :
toGuess = toGuess[:3] + "c" + toGuess[4:]
if word[1] != "C" and word[1] != "c" and word[2] != "C" and word[2] != "c" and word[3] != "C" and word[3] != "c" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "c" + ", "
if guessChar == "D" or guessChar == "d" :
if word[1] == "D" or word[1] == "d" :
toGuess = toGuess[:1] + "d" + toGuess[2:]
if word[2] == "D" or word[2] == "d" :
toGuess = toGuess[:2] + "d" + toGuess[3:]
if word[3] == "D" or word[3] == "d" :
toGuess = toGuess[:3] + "d" + toGuess[4:]
if word[1] != "D" and word[1] != "d" and word[2] != "D" and word[2] != "d" and word[3] != "D" and word[3] != "d" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "d" + ", "
if guessChar == "E" or guessChar == "e" :
if word[1] == "E" or word[1] == "e" :
toGuess = toGuess[:1] + "e" + toGuess[2:]
if word[2] == "E" or word[2] == "e" :
toGuess = toGuess[:2] + "e" + toGuess[3:]
if word[3] == "E" or word[3] == "e" :
toGuess = toGuess[:3] + "e" + toGuess[4:]
if word[1] != "E" and word[1] != "e" and word[2] != "E" and word[2] != "e" and word[3] != "E" and word[3] != "e" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "e" + ", "
if guessChar == "F" or guessChar == "f" :
if word[1] == "F" or word[1] == "f" :
toGuess = toGuess[:1] + "f" + toGuess[2:]
if word[2] == "F" or word[2] == "f" :
toGuess = toGuess[:2] + "f" + toGuess[3:]
if word[3] == "F" or word[3] == "f" :
toGuess = toGuess[:3] + "f" + toGuess[4:]
if word[1] != "F" and word[1] != "f" and word[2] != "F" and word[2] != "f" and word[3] != "F" and word[3] != "f" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "f" + ", "
if guessChar == "G" or guessChar == "g" :
if word[1] == "G" or word[1] == "g" :
toGuess = toGuess[:1] + "g" + toGuess[2:]
if word[2] == "G" or word[2] == "g" :
toGuess = toGuess[:2] + "g" + toGuess[3:]
if word[3] == "G" or word[3] == "g" :
toGuess = toGuess[:3] + "g" + toGuess[4:]
if word[1] != "G" and word[1] != "g" and word[2] != "G" and word[2] != "g" and word[3] != "G" and word[3] != "g" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "g" + ", "
if guessChar == "H" or guessChar == "h" :
if word[1] == "H" or word[1] == "h" :
toGuess = toGuess[:1] + "h" + toGuess[2:]
if word[2] == "H" or word[2] == "h" :
toGuess = toGuess[:2] + "h" + toGuess[3:]
if word[3] == "H" or word[3] == "h" :
toGuess = toGuess[:3] + "h" + toGuess[4:]
if word[1] != "H" and word[1] != "h" and word[2] != "H" and word[2] != "h" and word[3] != "H" and word[3] != "h" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "h" + ", "
if guessChar == "I" or guessChar == "i" :
if word[1] == "I" or word[1] == "i" :
toGuess = toGuess[:1] + "i" + toGuess[2:]
if word[2] == "I" or word[2] == "i" :
toGuess = toGuess[:2] + "i" + toGuess[3:]
if word[3] == "I" or word[3] == "i" :
toGuess = toGuess[:3] + "i" + toGuess[4:]
if word[1] != "I" and word[1] != "i" and word[2] != "I" and word[2] != "i" and word[3] != "I" and word[3] != "i" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "i" + ", "
if guessChar == "J" or guessChar == "j" :
if word[1] == "J" or word[1] == "j" :
toGuess = toGuess[:1] + "j" + toGuess[2:]
if word[2] == "J" or word[2] == "j" :
toGuess = toGuess[:2] + "j" + toGuess[3:]
if word[3] == "J" or word[3] == "j" :
toGuess = toGuess[:3] + "j" + toGuess[4:]
if word[1] != "J" and word[1] != "j" and word[2] != "J" and word[2] != "j" and word[3] != "J" and word[3] != "j" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "j" + ", "
if guessChar == "K" or guessChar == "k" :
if word[1] == "K" or word[1] == "k" :
toGuess = toGuess[:1] + "k" + toGuess[2:]
if word[2] == "K" or word[2] == "k" :
toGuess = toGuess[:2] + "k" + toGuess[3:]
if word[3] == "K" or word[3] == "k" :
toGuess = toGuess[:3] + "k" + toGuess[4:]
if word[1] != "K" and word[1] != "k" and word[2] != "K" and word[2] != "k" and word[3] != "K" and word[3] != "k" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "k" + ", "
if guessChar == "L" or guessChar == "l" :
if word[1] == | |
#*****************************************************************************#
#* Copyright (c) 2004-2008, SRI International. *#
#* All rights reserved. *#
#* *#
#* Redistribution and use in source and binary forms, with or without *#
#* modification, are permitted provided that the following conditions are *#
#* met: *#
#* * Redistributions of source code must retain the above copyright *#
#* notice, this list of conditions and the following disclaimer. *#
#* * Redistributions in binary form must reproduce the above copyright *#
#* notice, this list of conditions and the following disclaimer in the *#
#* documentation and/or other materials provided with the distribution. *#
#* * Neither the name of SRI International nor the names of its *#
#* contributors may be used to endorse or promote products derived from *#
#* this software without specific prior written permission. *#
#* *#
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS *#
#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT *#
#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR *#
#* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT *#
#* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *#
#* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *#
#* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *#
#* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *#
#* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *#
#* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *#
#* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *#
#*****************************************************************************#
#* "$Revision:: 128 $" *#
#* "$HeadURL:: https://svn.ai.sri.com/projects/spark/trunk/spark/src/spar#$" *#
#*****************************************************************************#
from spark.internal.version import *
from spark.internal.parse.processing import init_builtin
# from spark.internal.common import PM, DEBUG
# from spark.internal.exception import LowError
# from spark.internal.persist import persist_unit, persist_module
# import os
# import sys
# from spark.internal.source_locator import get_source_locator
# from spark.internal.parse.basicvalues import Symbol, isSymbol
# from spark.internal.repr.common_symbols import BUILTIN_MODPATH
# from spark.internal.repr.newterm import *
# from spark.internal.repr.newkinds import *
# from spark.internal.repr.newbuild import build_expr, HasSymInfo
# from spark.internal.repr.varbindings import NULL_EXPR_BINDINGS
# from spark.internal.repr.expr import Expr
# from spark.internal.repr.predexpr import PredExpr, SimplePredExpr
# from spark.internal.parse.basicvalues import ValueInt, value_str, Structure
# pm = PM(__name__)
# debug = DEBUG(__name__)#.on()
# # def parse(rule, text, info):
# # P = SPARK(SPARKScanner(text), info)
# # return wrap_error_reporter(P, rule)
# def parse(rule, text, unit):
# """internal parse handler. this should not be called externally"""
# return parseTerms(rule, text, unit)
# def read_file(abspathname, filename): # test routine
# module = None
# file = open(filename, 'rb')
# text = file.read()
# file.close()
# unit = SparklFileUnit(module, Symbol(abspathname), text, filename)
# return parse("main", text, unit)
# ################################################################
# # Load ID map/counter
# #
# # Each _saved_ SparklUnit object has a unique load id (sequentially
# # increasing) that is used to determine how to resume modules
# # correctly. It is also used in generating IDs for SPARK objects
# # - we don't have to persist/resume this as the persistence layer
# # implicitly initializes this by fixing the modpath load order
# # on resume.
# SPARKL_UNITS={}
# MAX_LOADID=-1
# def _save_sparkl_unit(unit, forceLoadid):
# """if forceLoadid > -1, then it will be used as the loadid instead. Otherwise, a monotonically
# increasing loadid will be assigned. Returns the loadid assigned to the unit"""
# global MAX_LOADID
# if forceLoadid > -1:
# SPARKL_UNITS[forceLoadid] = unit
# if forceLoadid > MAX_LOADID:
# MAX_LOADID = forceLoadid
# return forceLoadid
# else:
# MAX_LOADID += 1
# SPARKL_UNITS[MAX_LOADID] = unit
# return MAX_LOADID
# def fromIdGetUnit(id):
# return SPARKL_UNITS[id]
# ################################################################
# # Sparkl Unit object
# #
# # Represents a file or other unit of SPARK-L text source
# class SparklUnit(object):
# __slots__ = (
# "_module",
# "_externals", # list of used modules and imports
# "_includes", # list of included filepaths
# "_filepath", # absolute path of the file
# "_text", # text of file
# "_exprs", # exprs of file (includes syminfos)
# "_syminfos", # dict: idname -> syminfo
# "_export_pathexprs", # list of pathexprs
# "_export_dict", # dict: idname -> syminfo
# # the id of unit: this is not necessarily unique (can reload a unit)
# "unitid",
# # order in which this unit was loaded (unique per _saved_ SparklUnit object)
# "loadid",
# "_nextExprId",
# "_processedExprs", # like _exprs but includes processed closures
# )
# def __init__(self, module, filepath, text, unitid, saveSparklUnit, forceLoadid=-1):
# if not (isinstance(module, Module)): raise AssertionError
# if not (isSymbol(filepath)): raise AssertionError
# self._module = module
# self._externals = []
# self._includes = []
# self._text = text
# self._exprs = []
# self._filepath = filepath
# self._syminfos = {}
# self._export_pathexprs = []
# self.unitid = unitid
# self._nextExprId = 1
# self._processedExprs = []
# if saveSparklUnit:
# self.loadid = _save_sparkl_unit(self, forceLoadid)
# persist_unit(module, self)
# else:
# self.loadid = -1 #unsaved sparklunits always get ID -1
# def getNextExprId(self):
# id = self._nextExprId
# self._nextExprId = id + 1
# return id
# def getCurrentExprId(self):
# return self._nextExprId -1
# def addProcessedExpr(self, expr):
# # mostly added in order but not always
# processed = self._processedExprs
# self._processedExprs.append(expr)
# def required_modpaths(self):
# return [ext.get_modpath() for ext in self._externals]
# def get_module(self):
# return self._module
# def get_filepath(self):
# return self._filepath
# def get_filename(self): # default
# return self._filepath.name
# def get_exprs(self):
# return self._exprs
# def get_text(self):
# return self._text
# def idname_syminfo(self, idname):
# return self._syminfos.get(idname, None)
# def path_syminfo(self, path):
# """If there is a unique known symbol associated with the term
# via a using or import, return it. If there is none, return
# None. If there are multiple, raise an exception. This relies
# on the imported and used modules having all their symbols
# declared, but does not require this module's symbols to be
# declared."""
# idname = path.id
# modname = path.modname
# if modname is None:
# syminfo = self._syminfos.get(idname, None)
# for external in self._externals:
# a_syminfo = external.idname_syminfo(idname)
# if a_syminfo is not None:
# if syminfo is None:
# syminfo = a_syminfo
# elif syminfo != a_syminfo:
# raise LowError("%s is ambiguous, could be %s or %s",\
# idname, syminfo.symbol().name, \
# a_syminfo.symbol().name)
# return syminfo
# else:
# syminfo = None
# if modname == self._module.get_modpath().name:
# return self._syminfos.get(idname, None)
# else:
# for external in self._externals:
# if modname == external.get_modpath().name:
# return external.idname_syminfo(idname)
# raise LowError("Cannot access module of path %s here", path.name)
# def parse_and_build(self):
# terms = parse("main", self._text, self)
# for term in terms:
# if isinstance(term, TaggedItem):
# expr = build_expr(STATEMENT, term)
# elif isinstance(term, BraceTerm):
# expr = build_expr(STATEMENT, term)
# elif isinstance(term, ParenTerm):
# expr = build_expr(PREDEXPR, term)
# else:
# raise term.error("Expecting a command, statement, or fact")
# self.add_expr(expr)
# def add_using(self, module):
# self._externals.append(module)
# def add_includes(self, filepath):
# self._includes.append(filepath)
# def add_import(self, module, syminfos):
# modpath = module.get_modpath()
# for external in self._externals:
# if external.get_modpath() == modpath:
# break
# else:
# #print "Importing %s into %s"%(modpath.name, self._filepath.name)
# external = ImportExternal(modpath)
# self._externals.append(external)
# for syminfo in syminfos:
# external.add_syminfo(syminfo)
# def add_syminfo(self, syminfo):
# symbol = syminfo.symbol()
# modname = self._module.get_modpath().name
# if symbol.modname != modname:
# raise LowError("Attempting to define symbol %s in module %s", \
# symbol, modname)
# if self._syminfos.has_key(symbol.id):
# raise LowError("Attempting to redefine symbol %s", symbol)
# debug("Adding syminfo %s", syminfo)
# self._syminfos[symbol.id] = syminfo
# def add_exports(self, pathexprs):
# self._export_pathexprs = self._export_pathexprs + list(pathexprs)
# def process_exports(self): # do this after all units parsed and built
# export_dict = {}
# for pathexpr in self._export_pathexprs:
# if pathexpr is None: # special case for exportall
# # export all local declarations
# for syminfo in self._syminfos.values():
# sym = syminfo.symbol()
# try:
# if export_dict[sym.id] != syminfo:
# raise LowError("Id %s already exported as %s"%(sym.id,sym))
# except KeyError:
# export_dict[sym.id] = syminfo
# continue
# syminfo = self.path_syminfo(pathexpr.get_value())
# if syminfo is None:
# raise pathexpr.error("No declaration found")
# sym = syminfo.symbol()
# try:
# if export_dict[sym.id] != syminfo:
# raise LowError("Id %s already exported as %s"%(sym.id,sym))
# except KeyError:
# export_dict[sym.id] = syminfo
# self._export_dict = export_dict
# def get_exports(self):
# "Return a dict mapping idname to syminfos"
# return self._export_dict
# def add_expr(self, expr):
# if expr is None:
# return
# if isinstance(expr, HasSymInfo):
# try:
# self.add_syminfo(expr.get_syminfo())
# except LowError, err:
# pm.set()
# raise err.error(expr)
# elif not isinstance(expr, Expr):
# raise LowError("Cannot add this to the unit's exprs %r", expr)
# self._exprs.append(expr)
# def load_decls_into_agent(self, agent):
# debug("Loading decls into agent %s", agent)
# for expr in self.get_exprs():
# debug(" loading expr %s", expr)
# if isinstance(expr, HasSymInfo):
# syminfo = expr.get_syminfo()
# debug(" Loading syminfo %s", syminfo)
# agent.set_syminfo(syminfo)
# def load_facts_into_agent(self, agent):
# debug("Loading facts into agent %s", agent)
# for expr in self.get_exprs():
# debug(" loading expr %s", expr)
# if isinstance(expr, PredExpr):
# debug(" Loading fact %s", expr)
# try:
# expr.predexpr_conclude(agent, NULL_EXPR_BINDINGS)
# # if isinstance(expr, SimplePredExpr) \
# # and expr.predsym.id == "InformCalled":
# # from spark.internal.common import mybreak
# # expr1 = expr
# # try:
# # raise Exception("BREAK %s"%expr)
# # except AnyException:
# # pm.setprint()
# except LowError, err:
# pm.set()
# raise err.error(expr)
# ################################################################
# # | |
with the getCounterparts
def getLinkedEntitiesByTemplateType(self, splitPath, isMeme, linkType = 0, forcedContinue = False, excludeLinks = [], returnUniqueValuesOnly = True, excludeCluster = []):
""" This is a critically important method for finding associated (linked) entities. It parses the
link path and follows each step of the path in turn by a recursive call.
It also handles single star and double star wildcards.
* (single star) - is a one step wildcard. The recursive call will be made on every counterpart of the current step.
Use this wildard if you know how many steps are required to get to a particular entity, but not the intermediate
templates (meme or metameme).
** (double star) - this is a multistep wildcard. The recursive call will be made as many times as needed to get to
the first entity that matches the pattern after the double star. E.g. '**::SomeTemplate' would result in
recursive calls to scour entity links until 'SomeTemplate' is found.
path - the link path (in link path syntax) of sought entity's meme or metameme.
isMeme - this boolean determines whether we compare the memePath or metaMeme properties of the entities
linkTypes - the entity link type
maxDOS - the allowed number of degrees and it is decrimented with every recursive call. It is a hackish way of
preventing endless loops with circular link patterns. Later, a better method will be needed.
forcedContinue - If a double wildcard turns up in currentPathFragment (see below), then we have a wildcard search
with an unknown number of degrees of seperation. If this is true, we'll pass the recursion to every link
with a forced continue until we get our entity
returnUniqueValuesOnly - I set to True, this method will filter returnlist duplicates.
excludeCluster - This is an option for increasing performance on traverses; at the potential cost of fidelity.
If this paramter is a list and not None, whenerver we traverse entity 1+n in a cluster, we can never cross
back over an entity that we already traversed.
"""
method = moduleName + '.' + self.className + '.getLinkedEntitiesByTemplateType'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
selfUUIDAsStr = str(self.uuid)
if excludeCluster is not None:
excludeCluster.append(selfUUIDAsStr)
#Debug aid- Only used for logging in the event of a KeyError exception
oldSplitPath = copy.deepcopy(splitPath)
returnMembers = []
soughtPath = None
currentPathFragment = None
forwardTraverseJoin = '>>'
backwardTraverseJoin = '<<'
polydirectionalTraverseJoin = '::'
#Start by determining whether ot not the we have a leading direction indicator.
#If so, then set the direction to search for currPath and then remove the leading linkdir
soughtPathDirection = linkDirectionTypes.BIDIRECTIONAL #by default
if splitPath.startswith(forwardTraverseJoin) == True:
soughtPathDirection = linkDirectionTypes.OUTBOUND
splitPath = splitPath[2:]
elif splitPath.startswith(backwardTraverseJoin) == True:
soughtPathDirection = linkDirectionTypes.INBOUND
splitPath = splitPath[2:]
elif splitPath.startswith(polydirectionalTraverseJoin) == True:
splitPath = splitPath[2:]
#determine which traverse direction we have in splitPath
partitionSequence = polydirectionalTraverseJoin
lowestIndex = -1
forwardIndex = -1
reverseIndex = -1
polydirectionalIndex = -1
try:
forwardIndex = splitPath.index('>>')
except: pass
try:
reverseIndex = splitPath.index('<<')
except: pass
try:
polydirectionalIndex = splitPath.index('::')
lowestIndex = polydirectionalIndex
except: pass
if (forwardIndex > -1):
if (forwardIndex < lowestIndex) or\
((forwardIndex > lowestIndex) and (lowestIndex < 0)):
lowestIndex = forwardIndex
partitionSequence = forwardTraverseJoin
if ((reverseIndex > -1) or (reverseIndex == 0)):
if (reverseIndex < lowestIndex) or\
((reverseIndex > lowestIndex) and (lowestIndex < 0)):
lowestIndex = reverseIndex
partitionSequence = backwardTraverseJoin
#If forcedContinue is true, we don't bother splitting the path as there was a double wildcard in the recursion history
# somewhere. We'll just accept splitPath as it is.
if forcedContinue == False:
repartitionedSplitPath = splitPath.partition(partitionSequence)
currentPathFragment = repartitionedSplitPath[0]
if ((len(repartitionedSplitPath[2]) > 0) and (len(repartitionedSplitPath[1]) > 0)):
splitPath = "%s%s" %(repartitionedSplitPath[1], repartitionedSplitPath[2])
else:
splitPath = repartitionedSplitPath[2]
#Peel off the parameter filters from currentPathFragment
linkParams, nodeParams = self.getTraverseFilters(currentPathFragment)
reOuterParentheses = re.compile(r"\((.+)\)")
reInnerBrackets = re.compile(r"\[([^]]*)\]")
#strip of the bits inside parenthesis and brackets
currentPathFragment = re.sub(reOuterParentheses, '', currentPathFragment)
currentPathFragment = re.sub(reInnerBrackets, '', currentPathFragment)
# If currentPathFragment is a double wildcard, turn on forcedContinue
if currentPathFragment == "**":
forcedContinue = True
# If currentPathFragment is a wildcard, don't bother trying to resolve soughtPath as we're not trying to resolve
# anything in this step. Soughtpath is the template path of the next child in the link tree.
# If currentPathFragment is a single wildcard, then we will examine all children on this step.
# If forcedContinue is True, then we are trying to resolve a soughtPath in any case because we don't know how many
# degrees of seperation we have.
# If we lead with a unidirectional directional relationship, handle appropriately (see below)
if (forcedContinue == True) or (currentPathFragment != "*"):
try:
if isMeme == True:
try:
soughtPath = templateRepository.resolveTemplate(self.memePath, currentPathFragment, True)
except Exceptions.TemplatePathError as e:
if forcedContinue == False:
errorMsg = "Failed to resolve path relative to %s. Nested Traceback = %s" %(self.memePath, e)
logQ.put( [logType , logLevel.WARNING , method , errorMsg])
raise e
else:
#We only the fullPemplatePath attribute of the entity, not the actual path pointer
metaMeme = templateRepository.resolveTemplateAbsolutely(self.metaMeme)
soughtPath = templateRepository.resolveTemplate(metaMeme.path, currentPathFragment, True)
except Exception as e:
if forcedContinue == False:
errorMsg = "Failed to resolve path relative to %s. Nested Traceback = %s" %(self.memePath, e)
logQ.put( [logType , logLevel.WARNING , method , errorMsg])
raise e
try:
#linkDirectionTypes.BIDIRECTIONAL, '', None, linkAttributeOperatorTypes.EQUAL
members = linkRepository.getCounterparts(self.uuid, soughtPathDirection, linkParams, nodeParams, linkType, excludeLinks)
if excludeCluster is not None:
#we need to make sure that we don't backtrack, so filter the exclude list
memberSet = set(members)
excludeSet = set(excludeCluster)
memberSet.difference_update(excludeSet)
members = list(memberSet)
if (oldSplitPath == "*") and (splitPath == ""):
#We have a wildcard end effector on the traverse path. Just return members and be done with it
returnMembers = members
else:
newExcludeLinks = self.getLinkIDs()
excludeLinks.extend(newExcludeLinks)
for memberEntityID in members:
member = entityRepository.getEntity(memberEntityID)
isSingleton = member.getIsSingleton()
if soughtPath is not None:
#we are searching for a specific template
if ((isMeme == True) and (member.memePath.fullTemplatePath == soughtPath.path.fullTemplatePath)) or\
(member.metaMeme == soughtPath.path.fullTemplatePath):
if len(splitPath) > 0:
#splitPath, isMeme, linkType = 0, forcedContinue = False, excludeLinks = []
partialRet = member.getLinkedEntitiesByTemplateType(splitPath, isMeme, linkType, False, excludeLinks, returnUniqueValuesOnly, excludeCluster)
returnMembers.extend(partialRet)
else:
returnMembers.append(member.uuid)
if (forcedContinue == True) and (isSingleton == False):
partialRet = member.getLinkedEntitiesByTemplateType(splitPath, isMeme, linkType, forcedContinue, excludeLinks, returnUniqueValuesOnly, excludeCluster)
returnMembers.extend(partialRet)
else:
#currentPathFragment is a wildcard. Therefore soughtPath is None
# In ths case, we follow ALL the rabbit holes looking for our next hit, but with forcedContinue turned off
partialRet = member.getLinkedEntitiesByTemplateType(splitPath, isMeme, linkType, False, excludeLinks, returnUniqueValuesOnly, excludeCluster)
returnMembers.extend(partialRet)
except KeyError as e:
#self.getLinkedEntitiesByTemplateType(oldSplitPath, isMeme, linkType, forcedContinue, excludeLinks, returnUniqueValuesOnly, excludeCluster)
pass
except Exception as e:
#logQ.put( [logType , logLevel.DEBUG , method , "Failure getting linked entities. Traceback = %s" %e])
pass
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
if returnUniqueValuesOnly == True:
streamlinedReturnMembers = filterListDuplicates(returnMembers)
return streamlinedReturnMembers
else:
return returnMembers
#Todo - propogate the method additions to the script acade
def getLinkedEntityByMemeTag(self, tag, memBershipType = 0, direction = linkDirectionTypes.BIDIRECTIONAL):
""" """
#method = moduleName + '.' + self.className + '.getMemberEntiesByType'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
returnMembers = []
members = linkRepository.getCounterparts(self.uuid, direction, '', None, linkAttributeOperatorTypes.EQUAL, memBershipType)
for memberEntityID in members:
member = entityRepository.getEntity(memberEntityID)
member = entityRepository.getEntity(uuid)
for entityTag in member.tags:
if entityTag == tag:
returnMembers.append(member.uuid)
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return returnMembers
def addDecimalProperty(self, name, value, constrained = None, restMin = None, restMax = None, restList = None, memePath = None):
""" A method for adding ad hoc properties after entity creation """
#method = moduleName + '.' + self.className + '.addIntegerProperty'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
if self.getHasProperty(name) != True:
decValue = decimal.Decimal(value)
newprop | |
<filename>tests/test_style_api.py
# This file is part of datacube-ows, part of the Open Data Cube project.
# See https://opendatacube.org for more information.
#
# Copyright (c) 2017-2021 OWS Contributors
# SPDX-License-Identifier: Apache-2.0
import pytest
from datacube_ows.styles.api import (StandaloneStyle, apply_ows_style,
apply_ows_style_cfg, create_geobox,
generate_ows_legend_style,
generate_ows_legend_style_cfg,
xarray_image_as_png)
def test_indirect_imports():
assert xarray_image_as_png is not None
assert create_geobox is not None
@pytest.fixture
def simple_rgb_style_cfg():
return {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"needed_bands": ["red", "green", "blue"],
"scale_factor": 1.0,
"components": {
"red": {"red": 1.0},
"green": {"green": 1.0},
"blue": {"blue": 1.0}
}
}
@pytest.fixture
def simple_rgb_perband_scaling_style_cfg():
return {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"needed_bands": ["red", "green", "blue"],
"components": {
"red": {"red": 1.0, "scale_range": [0, 200]},
"green": {"green": 1.0, "scale_range": [0, 500]},
"blue": {"blue": 1.0}
},
"scale_range": [0, 350]
}
def test_component_style(dummy_raw_data, null_mask, simple_rgb_style_cfg):
style = StandaloneStyle(simple_rgb_style_cfg)
mask = style.to_mask(dummy_raw_data, null_mask)
result = style.transform_data(dummy_raw_data, mask)
for channel in ("red", "green", "blue"):
assert channel in result.data_vars.keys()
assert result["red"].values[0][0] == 5
assert result["green"].values[0][0] == 7
assert result["blue"].values[0][0] == 2
def test_perband_component_style(dummy_raw_data, null_mask, simple_rgb_perband_scaling_style_cfg):
style = StandaloneStyle(simple_rgb_perband_scaling_style_cfg)
mask = style.to_mask(dummy_raw_data, null_mask)
result = style.transform_data(dummy_raw_data, mask)
for channel in ("red", "green", "blue"):
assert channel in result.data_vars.keys()
@pytest.fixture
def simple_ramp_style_cfg():
return {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {
"band1": "ir",
"band2": "red"
}
},
"needed_bands": ["red", "ir"],
"color_ramp": [
{
"value": -0.00000001,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#000000",
"alpha": 1.0
},
{"value": 0.2, "color": "#FF00FF"},
{"value": 0.4, "color": "#00FF00"},
{"value": 0.5, "color": "#FFFF00"},
{"value": 0.6, "color": "#0000FF"},
{"value": 0.8, "color": "#00FFFF"},
{"value": 1.0, "color": "#FFFFFF"}
],
}
def test_ramp_style(dummy_raw_calc_data, raw_calc_null_mask, simple_ramp_style_cfg):
style = StandaloneStyle(simple_ramp_style_cfg)
result = apply_ows_style(style, dummy_raw_calc_data, valid_data_mask=raw_calc_null_mask)
for channel in ("red", "green", "blue", "alpha"):
assert channel in result.data_vars.keys()
# point 0 800, 200 (idx=0.6)maps to blue
assert result["alpha"].values[0] == 255
assert result["red"].values[0] == 0
assert result["green"].values[0] == 0
assert result["blue"].values[0] == 255
# point 1 100, 500 (idx<0)maps to transparent
assert result["alpha"].values[1] == 0
# point 2 1000,0 (idx=1.0) maps to white
assert result["alpha"].values[2] == 255
assert result["red"].values[2] == 255
assert result["green"].values[2] == 255
assert result["blue"].values[2] == 255
# point 3 600,200 (idx=0.5) maps to yellow
assert result["alpha"].values[3] == 255
assert result["red"].values[3] == 255
assert result["green"].values[3] >= 254 # Why isn't it 255?
assert result["blue"].values[3] == 0
# point 4 200,200 (idx=0.0) maps to black
assert result["alpha"].values[4] == 255
assert result["red"].values[4] == 0
assert result["green"].values[4] == 0
assert result["blue"].values[4] == 0
# point 5 1000,700 (idx=0.176) maps to between black and magenta
assert result["alpha"].values[5] == 255
assert result["green"].values[5] == 0
assert abs(result["red"].values[5] - result["blue"].values[5]) <= 1 # Why not exactly equal?
assert result["red"].values[5] > 0
assert result["red"].values[5] < 255
def test_ramp_expr_style(dummy_raw_calc_data, raw_calc_null_mask, simple_ramp_style_cfg):
del simple_ramp_style_cfg["index_function"]
del simple_ramp_style_cfg["needed_bands"]
simple_ramp_style_cfg["index_expression"] = "(ir-red)/(ir+red)"
style = StandaloneStyle(simple_ramp_style_cfg)
result = apply_ows_style(style, dummy_raw_calc_data, valid_data_mask=raw_calc_null_mask)
for channel in ("red", "green", "blue", "alpha"):
assert channel in result.data_vars.keys()
# point 0 800, 200 (idx=0.6)maps to blue
assert result["alpha"].values[0] == 255
assert result["red"].values[0] == 0
assert result["green"].values[0] == 0
assert result["blue"].values[0] == 255
# point 1 100, 500 (idx<0)maps to transparent
assert result["alpha"].values[1] == 0
# point 2 1000,0 (idx=1.0) maps to white
assert result["alpha"].values[2] == 255
assert result["red"].values[2] == 255
assert result["green"].values[2] == 255
assert result["blue"].values[2] == 255
# point 3 600,200 (idx=0.5) maps to yellow
assert result["alpha"].values[3] == 255
assert result["red"].values[3] == 255
assert result["green"].values[3] >= 254 # Why isn't it 255?
assert result["blue"].values[3] == 0
# point 4 200,200 (idx=0.0) maps to black
assert result["alpha"].values[4] == 255
assert result["red"].values[4] == 0
assert result["green"].values[4] == 0
assert result["blue"].values[4] == 0
# point 5 1000,700 (idx=0.176) maps to between black and magenta
assert result["alpha"].values[5] == 255
assert result["green"].values[5] == 0
assert abs(result["red"].values[5] - result["blue"].values[5]) <= 1 # Why not exactly equal?
assert result["red"].values[5] > 0
assert result["red"].values[5] < 255
def test_ramp_legend_standalone(simple_ramp_style_cfg):
style = StandaloneStyle(simple_ramp_style_cfg)
img = generate_ows_legend_style(style, 1)
assert img.mode == "RGBA"
assert img.size == (400, 125)
@pytest.fixture
def rgb_style_with_masking_cfg():
return {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"needed_bands": ["red", "green", "blue"],
"scale_range": (0.0, 1000.0),
"components": {
"red": {"red": 1.0},
"green": {"green": 1.0},
"blue": {"blue": 1.0}
},
"pq_masks": [
{
"band": "pq",
"flags": {
'splodgy': "Splodgeless",
},
},
{
"band": "pq",
"flags": {
"ugly": True,
"impossible": "Woah!"
},
"invert": True
},
]
}
def test_component_style_with_masking(dummy_raw_calc_data, raw_calc_null_mask, rgb_style_with_masking_cfg):
result = apply_ows_style_cfg(rgb_style_with_masking_cfg, dummy_raw_calc_data, valid_data_mask=raw_calc_null_mask)
for channel in ("red", "green", "blue", "alpha"):
assert channel in result.data_vars.keys()
alphas = result["alpha"].values
assert alphas[0] == 255
assert alphas[1] == 255
assert alphas[2] == 255
assert alphas[3] == 0
assert alphas[4] == 0
assert alphas[5] == 0
@pytest.fixture
def simple_colormap_style_cfg():
return {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"value_map": {
"pq": [
{
"title": "Impossibly Tasty",
"abstract": "Tasty AND Impossible",
"flags": {
"and": {
"flavour": "Tasty",
"impossible": "Woah!"
},
},
"color": "#FF0000"
},
{
"title": "Possibly Tasty",
"abstract": "Tasty and Possible",
"flags": {
"impossible": "Woah!"
},
"color": "#00FF00"
},
{
"title": "Ugly/Splodgy",
"abstract": "Ugly or splodgy",
"flags": {
"or": {
"ugly": True,
"splodgy": "Splodgy"
}
},
"color": "#0000FF"
},
]
}
}
def test_colormap_style(dummy_col_map_data, raw_calc_null_mask, simple_colormap_style_cfg):
result = apply_ows_style_cfg(simple_colormap_style_cfg, dummy_col_map_data, valid_data_mask=raw_calc_null_mask)
for channel in ("red", "green", "blue", "alpha"):
assert channel in result.data_vars.keys()
# point 0 fall through - transparent
assert result["alpha"].values[0] == 0
# point 1 tasty & impossible: red
assert result["alpha"].values[1] == 255
assert result["red"].values[1] == 255
assert result["green"].values[1] == 0
assert result["blue"].values[1] == 0
# point 2 splodgy or ugly: blue
assert result["alpha"].values[2] == 255
assert result["red"].values[2] == 0
assert result["green"].values[2] == 0
assert result["blue"].values[2] == 255
# point 3 bland & impossible: green
assert result["alpha"].values[3] == 255
assert result["red"].values[3] == 0
assert result["green"].values[3] == 255
assert result["blue"].values[3] == 0
# point 4 splodgy or ugly: blue
assert result["alpha"].values[4] == 255
assert result["red"].values[4] == 0
assert result["green"].values[4] == 0
assert result["blue"].values[4] == 255
# point 5 bland & impossible: green
assert result["alpha"].values[5] == 255
assert result["red"].values[5] == 0
assert result["green"].values[5] == 255
assert result["blue"].values[5] == 0
@pytest.fixture
def enum_colormap_style_cfg():
return {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"value_map": {
"pq": [
{
"title": "Blah",
"values": [8, 25],
"color": "#FF0000"
},
{
"title": "Rock and Roll",
"values": [4, 19, 25],
"color": "#00FF00"
},
{
"title": "",
"values": [23],
"color": "#0000FF"
},
]
}
}
def test_enum_colormap_style(dummy_col_map_data, raw_calc_null_mask, enum_colormap_style_cfg):
result = apply_ows_style_cfg(enum_colormap_style_cfg, dummy_col_map_data, valid_data_mask=raw_calc_null_mask)
for channel in ("red", "green", "blue", "alpha"):
assert channel in result.data_vars.keys()
# point 0 (8) Blah - red
assert result["alpha"].values[0] == 255
assert result["red"].values[1] == 255
assert result["green"].values[1] == 0
assert result["blue"].values[1] == 0
# point 1 (25) Blah - red
assert result["alpha"].values[1] == 255
assert result["red"].values[1] == 255
assert result["green"].values[1] == 0
assert result["blue"].values[1] == 0
# point 2 (10) - fall through, transparent
assert result["alpha"].values[2] == 0
# point 3 (19) - rnr green
assert result["alpha"].values[3] == 255
assert result["red"].values[3] == 0
assert result["green"].values[3] == 255
assert result["blue"].values[3] == 0
# point 4 (4): rnr green
assert result["alpha"].values[4] == 255
assert result["red"].values[4] == 0
assert result["green"].values[4] == 255
assert result["blue"].values[4] == 0
# point 5 (23): blue
assert result["alpha"].values[5] == 255
assert result["red"].values[5] == 0
assert result["green"].values[5] == 0
assert result["blue"].values[5] == 255
def test_ramp_legend(simple_colormap_style_cfg):
img = generate_ows_legend_style_cfg(simple_colormap_style_cfg, 1)
assert img.mode == "RGBA"
assert img.size == (300, 125)
def test_api_none_mask(dummy_col_map_data, raw_calc_null_mask, simple_colormap_style_cfg):
null_mask = apply_ows_style_cfg(simple_colormap_style_cfg, dummy_col_map_data, valid_data_mask=raw_calc_null_mask)
none_mask = apply_ows_style_cfg(simple_colormap_style_cfg, dummy_col_map_data)
for i in range(6):
for c in ("red", "green", "blue", "alpha"):
assert null_mask[c].values[i] == none_mask[c].values[i]
def test_landsat_like_configs(dummy_raw_ls_data, configs_for_landsat, null_mask):
for cfg in configs_for_landsat:
style = StandaloneStyle(cfg)
mask = style.to_mask(dummy_raw_ls_data, null_mask)
result = style.transform_data(dummy_raw_ls_data, mask)
assert result
def test_wofs_like_configs(dummy_raw_wo_data, configs_for_wofs, null_mask):
for cfg in configs_for_wofs:
style = StandaloneStyle(cfg)
mask = style.to_mask(dummy_raw_wo_data, null_mask)
result = style.transform_data(dummy_raw_wo_data, mask)
assert result
def test_fc_wofs_like_configs(dummy_raw_fc_plus_wo, configs_for_combined_fc_wofs, null_mask):
for cfg in configs_for_combined_fc_wofs:
style = StandaloneStyle(cfg)
mask = style.to_mask(dummy_raw_fc_plus_wo, null_mask)
result = style.transform_data(dummy_raw_fc_plus_wo, mask)
assert result
def test_multidate(xyt_dummydata, multi_date_cfg):
image = apply_ows_style_cfg(multi_date_cfg, xyt_dummydata)
assert len(image.x) == len(xyt_dummydata.x)
assert len(image.y) == len(xyt_dummydata.y)
assert | |
import random
from excel4lib.macro.excel4_macro_extension import *
from excel4lib.utils import *
from excel4lib.macro.excel4_instruction import *
from excel4lib.macro.excel4_argument import *
from excel4lib.macro.excel4_formula import *
from excel4lib.macro.excel4_value import *
from excel4lib.macro.excel4_variable import *
from excel4lib.exception import *
from excel4lib.config import *
from excel4lib.macro.excel4_result import *
from excel4lib.macro.excel4_register_formula import *
from excel4lib.sheet import *
class Excel4Obfuscator(Excel4MacroExtension):
name = "standard"
description = "Allows to obfuscate macro with standard Excel4.0 formulas suchas BITXOR, SUM, MID etc."
'''
Allows to obfuscate formulas, scatter them across worksheet, obfuscate variable names and values.
'''
def __init__(self, config=Excel4Config.obfuscator):
Excel4MacroExtension.__init__(self)
# Obfuscator configuration
self.config = config
# List of char obfuscation methods
self.ob_tech = []
# Max length of cell
self.cell_max_length = self.config.cell_limit
def _generate_noise(self, only_empty = False):
'''
Adds random values to worksheet cells
:param only_empty: flags that tells if add noise only to empty cells ( not reserved)
'''
# Walk through worksheet cell by cell
for cords in self.worksheet.worksheet_iterate():
# Choose whether add noise to this cell or not
if random.randint(0,10) != 1:
continue
noise = random_string(random.randint(4, 20))
noise_cell = self._create_value(cords[0], cords[1], noise)
# Check if cell is reserved
if self.worksheet.is_reserved(cords[0], cords[1]):
if only_empty:
continue
# Check if obfuscation of cell is enabled
cell = self.worksheet.get_cell(cords[0], cords[1])
if not cell._spread or not cell._obfuscate:
continue
try:
# Move cell to the next cell if reserved
self.worksheet.move_cell(cell)
except CouldNotMoveCellException as ex:
continue
curr_cords = self.worksheet.get_current_cords()
# Add noise
self.worksheet.add_cell(noise_cell)
self.worksheet.set_current_cords(curr_cords[0], curr_cords[1])
def _spread_formulas(self, trigger_x, trigger_y):
'''
Spreads formulas across cells in worksheet
:param trigger_x: number of column in which first call is placed
:param trigger_y: number of row in which first call is placed
'''
# Get current cords. We need to remember current cord cause we will want to back execution to this cell.
cords_backup = self.worksheet.get_current_cords()
cells_cache = {}
# Get cells to spread
# For each column in worksheet
for t in self.worksheet.column_iterate():
# Get column number
c_num = t[0]
# Get cells in column
cells_temp = t[1]
if not cells_temp:
continue
values = cells_temp.values()
# For each cell in column
for cell in values:
# Check if obfuscation of cell/formula is enabled
if (not cell._spread) or (not cell._obfuscate):
continue
# Save cell in cache
try:
cells_cache[c_num][cell.y] = cell
except KeyError:
cells_cache[c_num] = {cell.y : cell}
# Remove cells from worksheet
# For each column in cache
for c in cells_cache.keys():
for cell in cells_cache[c].values():
# Remove cell from worksheet
# x,y of cell will be changed, and cell will be placed at another cords
self.worksheet.remove_cell(cell)
trigger_cell = None
# Add jump to first call
if trigger_x in cells_cache:
# Find first call
for row in cells_cache[trigger_x]:
if row >= trigger_y:
trigger_cell = self._go_to(trigger_x, row, cells_cache[trigger_x][row])
self.worksheet.add_cell(trigger_cell)
break
# Spread cells across worksheet
# For each column in cache
for c in cells_cache.keys():
self._spread_column(list(cells_cache[c].values()), trigger_x, trigger_y, trigger_cell)
# Restore original cords
self.worksheet.set_current_cords(cords_backup[0], cords_backup[1])
def _spread_column(self, cells, trigger_x, trigger_y, trigger_cell):
'''
Spread `cells` across worksheet.
:param cells: list of cells that are in the same column
:param trigger_x: auto_open or auto_close function column
:param trigger_y: auto_open or auto_close function row
:param trigger_cell: auto_open or auto_close cell
'''
# Number of cells
cells_num = len(cells)
# The number of formulas that have already been spread across sheet
cnt = 0
fail_cnt = 0
for_loop_cache = []
if not cells:
return
while cnt < cells_num:
# Generate random cords
# IF all columns are reserved then add new one and place payoad there
if fail_cnt > 1000:
self.config.spread_x_max = self.config.spread_x_max + 1
target_x = self.config.spread_x_max
else:
target_x = random.randint(self.config.spread_x_min, self.config.spread_x_max)
target_y = random.randint(self.config.spread_y_min, self.config.spread_y_max)
# Space between auto_open/auto_close cell and first call should be empty
if(target_x == trigger_x) and (target_y in range(trigger_y, trigger_cell.y)):
continue
# If the same coordinates are drawn then randomize again
if (target_x == self.worksheet._curr_x) and (target_y == self.worksheet._curr_y):
# Inc failure counter
fail_cnt = fail_cnt + 1
continue
height = random.randint(1, cells_num - cnt)
# Check if cells are free
# We need to add 1 to height since we need additional cell for GOTO formula
if self.worksheet.is_reserved(target_x, target_y, height + 1 + 1 + 1):
# Inc failure counter
fail_cnt = fail_cnt + 1
continue
self.worksheet.set_current_cords(target_x, target_y)
cnt_h = cnt+height
# Add random number of cells to worksheet at random cords
for cell in cells[cnt:cnt_h]:
# Loops require end statement in the same column
# So we need to place them in the same one
if issubclass(type(cell), Excel4LoopFormula):
# Save column and row number of this loop
for_loop_cache.append((self.worksheet._curr_x, self.worksheet._curr_y + (cnt_h - cnt) + 2))
elif issubclass(type(cell), Excel4EndLoopFormula):
break
self.worksheet.add_next_cell(cell)
cnt = cnt + 1
# If there are more cells to spread
if cnt < cells_num:
# @HACK
# If cells[cnt] is Ecel4Variable then get_reference function will return variable name
# But if this variable name is not defined or this variable name is not storing address of another cell
# then we can't GOTO to this formula(we need address of this formula).
# So to bypass this we need to add an empty Excel4Value, because then get_reference function will return address of cell
if issubclass(type(cells[cnt]), Excel4Variable):
empty = self._create_empty_formula(cells[cnt].x, cells[cnt].y)
cells.insert(cnt, empty)
cells_num = cells_num + 1
# If there are more cells to spread, then redirect macro execution
# to the next cell.
self.worksheet.add_next_cell(self._go_to(-1, -1, cells[cnt]))
else:
break
if issubclass(type(cells[cnt]), Excel4EndLoopFormula):
if len(for_loop_cache) < 1:
raise Excel4LoopFormulaMissing("Excel4EndLoopFormula detected but Excel4LoopFormula is missing. Excel4 requires that the loops and NEXT() formula be in the same column.")
cords = for_loop_cache.pop()
cells[cnt].x = cords[0]
cells[cnt].y = cords[1]
self.worksheet.add_cell(cells[cnt])
cnt = cnt + 1
# If there are more cells to spread
if cnt < cells_num:
# @HACK
# If cells[cnt] is Ecel4Variable then get_reference function will return variable name
# But if this variable name is not defined or this variable name is not storing address of another cell
# then we can't GOTO to this formula(we need address of this formula).
# So to bypass this we need to add an empty Excel4Value, because then get_reference function will return address of cell
if issubclass(type(cells[cnt]), Excel4Variable):
empty = self._create_empty_formula(cells[cnt].x, cells[cnt].y)
cells.insert(cnt, empty)
cells_num = cells_num + 1
# If there are more cells to spread, then redirect macro execution
# to the next cell.
self.worksheet.add_cell(self._go_to(cells[cnt-1].x, cells[cnt-1].y + 1, cells[cnt]))
else:
break
# ADD RETURN
self.worksheet.add_next_cell(self._create_formula(-1, -1, "RETURN"))
def _create_argument_object(self, instruction, *args):
instruction_name = Excel4InstructionName(instruction, self.config.translate)
o = Excel4FormulaArgument(instruction_name, *args)
if not self.config.translate:
# Do not translate obfuscator objects
o.revert_translation()
return o
def _create_formula(self,x, y, instruction, *args):
instruction_name = Excel4InstructionName(instruction, self.config.translate)
o = Excel4Formula(x, y, instruction_name, *args)
if not self.config.translate:
# Do not translate obfuscator objects
o.revert_translation()
return o
def _create_value(self, x, y, value):
o = Excel4Value(x,y, value)
if not self.config.translate:
# Do not translate obfuscator objects
o.revert_address_translation()
return o
def _create_empty_formula(self, x, y):
return self._create_value(x,y,"")
def _create_result_formula(self, x, y):
o = Excel4Result(x,y)
if not self.config.translate:
# Do not translate obfuscator objects
o.revert_address_translation()
return o
def _go_to(self, x, y, formula):
instruction_name = Excel4InstructionName("GOTO", self.config.translate)
o = Excel4GoToFormula(x, y, instruction_name, formula)
if not self.config.translate:
# Do not translate obfuscator objects
o.revert_translation()
return o
def _char(self, s):
'''
Returns CHAR formula
:param s: string, char
:return:
'''
return self._create_argument_object("CHAR", s)
def char(self, c):
'''
Puts c character in CHAR formula
:param c: charcater
:return: CHAR formula call
'''
if not is_number(c):
c = ord(c)
return self._char(c)
def int(self, c):
'''
Converts c character to CHAR(INT(C)) call
:param c: charcater
:return:
'''
if not is_number(c):
c = ord(c)
return self._char(self._create_argument_object("INT", str(c)))
def sum(self, c):
'''
Converts c character to CHAR(SUM(R, c-k/k-c) call
:param c: charcater
:return:
'''
if not is_number(c):
c = ord(c)
k = random.randint(1, 1000)
while k == c:
k = random.randint(1, 1000)
if k < c:
r = c - k
else:
r = k - c
return self._char(self._create_argument_object("SUM", r, k))
def mid(self, c):
'''
Converts c character to MID(STR, RAND_INDEX,1) call
:param c: charcater
:return:
'''
if is_number(c):
| |
# The master_shortname_mol_dict
# Get it from the pickle file
print("Getting master_shortname_mol_dict from pickle file")
master_shortname_mol_dict = get_obj_from_pickle_file(master_shortname_mol_dict_pickle)
return master_mol_dict, master_shortname_mol_dict
##################################################################
#####################################################################
# I/O
#####################################################################
def get_full_length_mol_name(vars, master_mol_dict, master_shortname_mol_dict):
"""
Get full-length mol_name and make sure that it is in the master_mol_dict
Inputs:
:param dict vars: dictionary of variable to use
:param dict master_mol_dict: dictionary containing the information from every
ligand from the AutoGrow run. keys are full-length name of the ligands.
:param dict master_shortname_mol_dict: dictionary where keys are
shorthand names and the items the full-length name.
Returns:
:returns: str mol_name: full-length name of ligand.
"""
if vars["mol_name"] not in master_mol_dict.keys():
if vars["mol_name"] not in master_shortname_mol_dict.keys():
# may be a gypsum variant with '__{}'.format(num) at the end
# ie Gen_5_Mutant_46_684401 could be represented as Gen_5_Mutant_46_684401__1
if "__" in vars["mol_name"]:
test_name = vars["mol_name"].split("__")[0]
if (test_name not in master_shortname_mol_dict.keys() and \
test_name not in master_mol_dict.keys()):
printout = "mol_name provided not found in shorthand or" \
+ "full-length dictionaries. Please check that mol_name is in" \
+ "the AutoGrow run tested. \n" \
+ "Name provided is :\n\t{}".format(vars["mol_name"] \
+ "\nName should look like is :" \
+ "\n\t (Gen_2_Mutant_7_97143)Gen_4_Mutant_7_802531" \
+ "\n\t\t or \n\t Gen_4_Mutant_7_802531")
print(printout)
raise Exception(printout)
if test_name in master_shortname_mol_dict.keys():
mol_name = master_shortname_mol_dict[test_name]
else:
mol_name = test_name
del test_name
else:
printout = "mol_name provided not found in shorthand or" \
+ "full-length dictionaries. Please check that mol_name is in" \
+ "the AutoGrow run tested. \n" \
+ "Name provided is :\n\t{}".format(vars["mol_name"] \
+ "\nName should look like is :" \
+ "\n\t (Gen_2_Mutant_7_97143)Gen_4_Mutant_7_802531" \
+ "\n\t\t or \n\t Gen_4_Mutant_7_802531")
print(printout)
raise Exception(printout)
mol_name = master_shortname_mol_dict[vars["mol_name"]]
else:
# original name is already full-length name
mol_name = vars["mol_name"]
return mol_name
#
def run_purge_previous_pickled_files(vars):
"""
This will delete previously created pickled files within the input_dir.
The four files it will delete are:
`$input_dir/comp_dict_pickle`, `$input_dir/master_mol_dict_pickle`,
`$input_dir/master_shortname_mol_dict_pickle`,
and `$input_dir/ranked_mol_dict_pickle`.
These files save time when you are tracing the lineage of multiple
compounds, however purging these files may be helpful for space saving
or if it had been previously run with an invalid input variable.=
Following file deletion the program will terminate.
inputs:
:params vars inputs: dictionary of argparse parameters
"""
print("\nDELETING PREVIOUSLY GENERATED PICKLED FILES.\n")
input_dir = vars["input_dir"] + os.sep
if os.path.exists(input_dir) is False:
raise Exception("Input folder {} does not\
exist.".format(input_dir))
for file_name in ["comp_dict_pickle", "master_mol_dict_pickle",
"master_shortname_mol_dict_pickle", "ranked_mol_dict_pickle"]:
file_path = input_dir + file_name
if os.path.exists(file_path) is False:
printout = "Could not delete {} file".format(file_name)
printout = printout + " as it was not located at:\n\t {}\n".format(file_path)
print(printout)
else:
try:
os.remove(file_path)
print("Deleted: {}".format(file_path))
except:
printout = "WARNING: Could not delete {} file.\n".format(file_name)
printout = printout + "\tPlease check file permissions of:"
printout = printout + "\n\t\t {}\n".format(file_path)
print(printout)
# Check that is was successfully deleted
if os.path.exists(file_path) is False:
print("Deleted: {}".format(file_path))
print("Attempt to delete files completed.")
sys.exit(0)
def process_inputs(inputs):
"""
This will handle processing all parameters.
inputs:
:params dict inputs: dictionary of argparse parameters
Returns:
:returns: dict inputs: dictionary of argparse parameters
"""
# handle input information
inputs["input_dir"] = os.path.abspath(inputs["input_dir"]) + os.sep
if os.path.exists(inputs["input_dir"]) is False:
raise Exception("Input folder {} does not\
exist.".format(inputs["input_dir"]))
# get vars dict from last run
inputs["vars_json"] = inputs["input_dir"] + "vars.json"
if os.path.exists(inputs["vars_json"]) is False:
raise Exception("Input folder {} does not contain the vars.json file \
necessary to run script. Please make sure the vars.json is in the \
folder.".format(inputs["input_dir"]))
try:
with open(inputs["vars_json"], "r") as f:
vars_dict = json.load(f)
except:
raise Exception("variable file would not import. It should be the \
vars.json file written by AutoGrow in the output folder of the run.")
# Handle output directory
inputs["output_dir"] = os.path.abspath(inputs["output_dir"]) + os.sep
if os.path.exists(inputs["output_dir"]) is False:
try:
os.mkdir(inputs["output_dir"])
print("Made the output dir at: {}".format((inputs["output_dir"])))
except:
pass
if os.path.exists(inputs["output_dir"]) is False:
raise Exception("Output folder {} does not\
exist.".format(inputs["output_dir"]))
# handle source_compound_file .smi file
if type(inputs["source_compound_file"]) is not str or inputs["source_compound_file"] == "":
raise Exception("--source_compound_file must be provided. It should be \
the tab-delineated .smi file used to seed generation zero of the \
AutoGrow run. This is a mandatory file.")
inputs["source_compound_file"] = os.path.abspath(inputs["source_compound_file"])
if os.path.exists(inputs["source_compound_file"]) is False:
raise Exception("source_compound_file could not be found \
at: {}".format(inputs["source_compound_file"]))
if inputs["source_compound_file"].split(".")[-1] != "smi":
raise Exception("--source_compound_file must be provided. It should be \
the tab-delineated .smi file used to seed generation zero of the \
AutoGrow run. This is a mandatory file.")
# assign the destination for our pickle files (may already exist)
inputs["ranked_mol_dict_pickle"] = inputs["input_dir"] + "ranked_mol_dict_pickle"
inputs["comp_dict_pickle"] = inputs["input_dir"] + "comp_dict_pickle"
inputs["master_mol_dict_pickle"] = inputs["input_dir"] + "master_mol_dict_pickle"
inputs["master_shortname_mol_dict_pickle"] = inputs["input_dir"] \
+ "master_shortname_mol_dict_pickle"
# handle singles image folder
inputs["single_image_folder"] = inputs["output_dir"] + "single_image_folder" + os.sep
if os.path.exists(inputs["single_image_folder"]) is False:
os.mkdir(inputs["single_image_folder"])
if "mol_name" not in inputs.keys():
inputs["mol_name"] = None
if inputs["pre_run"] is False:
inputs["ancestry_image_folder"] = inputs["output_dir"] \
+ "ancestry_"+ inputs["mol_name"] + os.sep
# Will wait to create this folder until its needed
# Handle the cleanup variable purge_previous_pickled_files
if "purge_previous_pickled_files" in inputs.keys():
if inputs["purge_previous_pickled_files"] in [True, "true", "True"]:
# We will delete files
inputs["purge_previous_pickled_files"] = True
elif inputs["purge_previous_pickled_files"] in [False, "false", "False"]:
# We will not delete files
inputs["purge_previous_pickled_files"] = False
else:
# Can not understand the input option
raise Exception("Please check the --purge_previous_pickled_files setting provided." \
" --purge_previous_pickled_files should be True or False.")
else:
inputs["purge_previous_pickled_files"] = False
# If true delete files and terminate program
if inputs["purge_previous_pickled_files"] is True:
run_purge_previous_pickled_files(inputs)
return inputs
#
def run_everything(vars):
"""
This script runs everything
Inputs:
:params dict INPUTS: dictionary of argparse parameters
"""
master_mol_dict, master_shortname_mol_dict = get_mol_dict(vars)
if vars["pre_run"] is True or vars["mol_name"] in [None, "None", ""]:
print("pre-run completed")
sys.exit(0)
mol_name = get_full_length_mol_name(vars, master_mol_dict, master_shortname_mol_dict)
print("The full-length name of the ligand is: ", mol_name)
print("")
lineage_dict = get_all_ancestors(mol_name, master_shortname_mol_dict)
# make a simplified master_mol_dict
mol_dict = {}
for gen_list in lineage_dict.keys():
for lig_name in lineage_dict[gen_list]:
if lig_name is not None:
mol_dict[lig_name] = master_mol_dict[lig_name]
del master_mol_dict
del master_shortname_mol_dict
# Write all information of lieage to .smi file
lineage_smi = vars["output_dir"] + \
str(mol_name) + "_lineage.smi"
lineage_list = []
for gen_num in lineage_dict.keys():
lineage_list.extend(lineage_dict[gen_num])
printout = ""
for lig_name in lineage_list:
if lig_name is None:
continue
temp = copy.deepcopy(mol_dict[lig_name])
del temp[-1] # remove last item which is rdkit mol
temp = "\t".join([str(x) for x in temp]) + "\n"
printout = printout + temp
with open(lineage_smi, 'w') as f:
f.write(printout)
# generate images
make_image_files(vars, lineage_dict, mol_dict)
######################################
######################################
######################################
PARSER = argparse.ArgumentParser()
# Get needed info
PARSER.add_argument(
"--output_dir",
"-o",
metavar="param.output_dir",
required=True,
help="Path to folder to output files. will be created if does not exist",
)
PARSER.add_argument(
"--input_dir",
"-i",
metavar="param.input_dir",
required=True,
help="Path to input folder containing the AutoGrow run. This should be the \
top folder which contains the vars.json file.",
)
PARSER.add_argument(
"--source_compound_file",
metavar="param.source_compound_file",
required=True,
default="",
help="This is the source .smi file used to seed generation zero of the \
AutoGrow run. This is an essential file.",
)
PARSER.add_argument(
"--pre_run",
metavar="param.pre_run",
default=False,
help="If True this will compile the necessary dictions/picklefiles and then \
terminate. These pickle files are stored in the input folder containing the \
vars.json file from the AutoGrow run.",
)
PARSER.add_argument(
"--mol_name",
metavar="param.mol_name",
default=None,
help="This is the name of the molecule whose lineage will be traced back. \
If not provided or None, the script will simply compile the necessary \
dictions/picklefiles and then terminate. These pickle files are stored \
in the input folder containing the vars.json file from the AutoGrow run.\
example mol_name: Gen_5_Cross_203131 or Gen_4_Mutant_7_802531 \
can also be provided as full-name ie: \
(Gen_2_Mutant_7_97143)Gen_4_Mutant_7_802531",
)
PARSER.add_argument(
"--purge_previous_pickled_files",
metavar="param.purge_previous_pickled_files",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True the script will delete the four pickled files previously \
created by this script: `comp_dict_pickle`, `master_mol_dict_pickle`, \
`master_shortname_mol_dict_pickle`, and `ranked_mol_dict_pickle`. \
These files save time when you are tracing the lineage of multiple \
compounds, however purging these files may be helpful for space saving \
or if it had been previously run with an invalid input variable. \
This does not affect the lineage files located in `output_dir`. \
Program will terminate once these files are deleted.",
)
ARGSDICT = vars(PARSER.parse_args())
# copying ARGSDICT so | |
<gh_stars>0
import numpy as np
import os
from utils.io import IO
path = os.path
class Feature:
def __init__(self, feature_id, class_id, class_name,
start_clip_number, end_clip_number, event_id=None):
"""Create a new 'Feature' object.
Args:
feature_id: The position of the feature in the source video relative to
other features
event_id: The id of the event to which this feature was assigned
start_clip_number: The number of the first frame in which the feature
occurred
end_clip_number: The number of the last frame in which the feature
occurred
"""
self.feature_id = feature_id
self.class_id = class_id
self.class_name = class_name
self.start_clip_number = start_clip_number
self.end_clip_number = end_clip_number
self.event_id = event_id
# the number of consecutive frames over which the feature occurs
# if self.end_clip_number and self.start_clip_number:
self.length = self.end_clip_number - self.start_clip_number
# else:
# self.length = None
def __str__(self):
print_string = '\tfeature_id: ' + str(self.feature_id) + '\n'
print_string += '\tevent_id: ' + str(self.event_id) + '\n'
print_string += '\tclass_id: ' + str(self.class_id) + '\n'
print_string += '\tclass_name: ' + str(self.class_name) + '\n'
print_string += '\tstart_clip_number: ' + str(self.start_clip_number) + \
'\n'
print_string += '\tend_clip_number: ' + str(self.end_clip_number) + '\n'
print_string += '\tlength: ' + str(self.length)
return print_string
class ActivationEvent:
def __init__(self, event_id, target_feature_list,
preceding_feature=None, following_feature=None):
"""Create a new 'Event' object.
Args:
event_id: int. The position of the event in the source video relative to
other events.
target_feature_list: Feature List. A list of features the event of
interest could contain.
preceding_feature: Feature. An auxiliary feature strictly different in
type from the target feature that should be included in the event if it
occurs just before the target feature in the source video.
following_feature: Feature. An auxiliary feature strictly different in
type from the target feature that should be included in the event if it
occurs just after the target feature in the source video.
"""
self.event_id = event_id
self.target_feature_list = target_feature_list
for target_feature in self.target_feature_list:
target_feature.event_id = self.event_id
self.start_clip_number = self.target_feature_list[0].start_clip_number
self.end_clip_number = self.target_feature_list[-1].end_clip_number
if preceding_feature:
self.start_clip_number = preceding_feature.start_clip_number
if following_feature:
self.end_clip_number = following_feature.end_clip_number
self.length = self.end_clip_number - self.start_clip_number
self.length = self.end_clip_number - self.start_clip_number
self._preceding_feature = preceding_feature
self._following_feature = following_feature
self.contains_nw_veh_warning_type_1 = None
self.contains_nw_veh_warning_type_2 = None
self.contains_nw_veh_warning_type_3 = None
self.contains_nw_veh_warning_type_4 = None
self.contains_se_veh_warning_type_1 = None
self.contains_se_veh_warning_type_2 = None
self.contains_se_veh_warning_type_3 = None
self.contains_se_veh_warning_type_4 = None
self.contains_north_ped_warning_type_1 = None
self.contains_north_ped_warning_type_2 = None
self.contains_north_ped_warning_type_3 = None
self.contains_north_ped_warning_type_4 = None
self.contains_south_ped_warning_type_1 = None
self.contains_south_ped_warning_type_2 = None
self.contains_south_ped_warning_type_3 = None
self.contains_south_ped_warning_type_4 = None
self.contains_ped_arnd_se_ped_gate = None
self.contains_ped_arnd_ne_ped_gate = None
self.contains_ped_arnd_ne_veh_gate = None
self.contains_ped_arnd_sw_ped_gate = None
self.contains_ped_arnd_sw_veh_gate = None
self.contains_ped_arnd_nw_ped_gate = None
self.contains_ped_over_se_ped_gate = None
self.contains_ped_over_ne_ped_gate = None
self.contains_ped_over_ne_veh_gate = None
self.contains_ped_over_sw_ped_gate = None
self.contains_ped_over_sw_veh_gate = None
self.contains_ped_over_nw_ped_gate = None
self.contains_ped_undr_se_ped_gate = None
self.contains_ped_undr_ne_ped_gate = None
self.contains_ped_undr_ne_veh_gate = None
self.contains_ped_undr_sw_ped_gate = None
self.contains_ped_undr_sw_veh_gate = None
self.contains_ped_undr_nw_ped_gate = None
self.train_is_present = None
def find_violations(self, classifications):
# find vehicle violations
violation_state = np.any(classifications[:, [34, 40, 43, 49]], axis=1)
# Vehicle traversed a crossing while lights were flashing but before gates
# started descending
gate_state = classifications[:, 1]
self.contains_nw_veh_warning_type_1 = np.any(
np.logical_and(gate_state, violation_state))
# Vehicle traversed a crossing while gates were descending
gate_state = classifications[:, 4]
self.contains_nw_veh_warning_type_2 = np.any(
np.logical_and(gate_state, violation_state))
# Vehicle traversed a crossing while gates were fully horizontal
gate_state = classifications[:, 2]
self.contains_nw_veh_warning_type_3 = np.any(
np.logical_and(gate_state, violation_state))
# Vehicle traversed a crossing while gates were ascending
gate_state = classifications[:, 3]
self.contains_nw_veh_warning_type_4 = np.any(
np.logical_and(gate_state, violation_state))
violation_state = np.any(classifications[:, [31, 37, 46, 52]], axis=1)
# Vehicle traversed a crossing while lights were flashing but before gates
# started descending
gate_state = classifications[:, 1]
self.contains_se_veh_warning_type_1 = np.any(
np.logical_and(gate_state, violation_state))
# Vehicle traversed a crossing while gates were descending
gate_state = classifications[:, 4]
self.contains_se_veh_warning_type_2 = np.any(
np.logical_and(gate_state, violation_state))
# Vehicle traversed a crossing while gates were fully horizontal
gate_state = classifications[:, 2]
self.contains_se_veh_warning_type_3 = np.any(
np.logical_and(gate_state, violation_state))
# Vehicle traversed a crossing while gates were ascending
gate_state = classifications[:, 3]
self.contains_se_veh_warning_type_4 = np.any(
np.logical_and(gate_state, violation_state))
# find pedestrian violations
violation_state = np.any(
classifications[:, [69, 79, 80, 83, 85, 86, 89, 91, 92, 95]], axis=1)
# Pedestrian traversed a crossing while lights were flashing but before
# gates started descending
gate_state = classifications[:, 1]
self.contains_north_ped_warning_type_1 = np.any(
np.logical_and(gate_state, violation_state))
# Pedestrian traversed a crossing while gates were descending
gate_state = classifications[:, 4]
self.contains_north_ped_warning_type_2 = np.any(
np.logical_and(gate_state, violation_state))
# Pedestrian traversed a crossing while gates were fully horizontal
gate_state = classifications[:, 2]
self.contains_north_ped_warning_type_3 = np.any(
np.logical_and(gate_state, violation_state))
# Pedestrian traversed a crossing while gates were ascending
gate_state = classifications[:, 3]
self.contains_north_ped_warning_type_4 = np.any(
np.logical_and(gate_state, violation_state))
violation_state = np.any(
classifications[:, [68, 78, 81, 82, 84, 87, 88, 90, 93, 94]], axis=1)
# Pedestrian traversed a crossing while lights were flashing but before gates started descending
gate_state = classifications[:, 1]
self.contains_south_ped_warning_type_1 = np.any(
np.logical_and(gate_state, violation_state))
# Pedestrian traversed a crossing while gates were descending
gate_state = classifications[:, 4]
self.contains_south_ped_warning_type_2 = np.any(
np.logical_and(gate_state, violation_state))
# Pedestrian traversed a crossing while gates were fully horizontal
gate_state = classifications[:, 2]
self.contains_south_ped_warning_type_3 = np.any(
np.logical_and(gate_state, violation_state))
# Pedestrian traversed a crossing while gates were ascending
gate_state = classifications[:, 3]
self.contains_south_ped_warning_type_4 = np.any(
np.logical_and(gate_state, violation_state))
self.contains_ped_arnd_se_ped_gate = np.any(classifications[:, 78])
self.contains_ped_arnd_ne_ped_gate = np.any(classifications[:, 79])
self.contains_ped_arnd_ne_veh_gate = np.any(classifications[:, 80])
self.contains_ped_arnd_sw_ped_gate = np.any(classifications[:, 81])
self.contains_ped_arnd_sw_veh_gate = np.any(classifications[:, 82])
self.contains_ped_arnd_nw_ped_gate = np.any(classifications[:, 83])
self.contains_ped_over_se_ped_gate = np.any(classifications[:, 84])
self.contains_ped_over_ne_ped_gate = np.any(classifications[:, 85])
self.contains_ped_over_ne_veh_gate = np.any(classifications[:, 86])
self.contains_ped_over_sw_ped_gate = np.any(classifications[:, 87])
self.contains_ped_over_sw_veh_gate = np.any(classifications[:, 88])
self.contains_ped_over_nw_ped_gate = np.any(classifications[:, 89])
self.contains_ped_undr_se_ped_gate = np.any(classifications[:, 90])
self.contains_ped_undr_ne_ped_gate = np.any(classifications[:, 91])
self.contains_ped_undr_ne_veh_gate = np.any(classifications[:, 92])
self.contains_ped_undr_sw_ped_gate = np.any(classifications[:, 93])
self.contains_ped_undr_sw_veh_gate = np.any(classifications[:, 94])
self.contains_ped_undr_nw_ped_gate = np.any(classifications[:, 95])
self.train_is_present = np.any(
classifications[:, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]])
@property
def preceding_feature(self):
return self._preceding_feature
@preceding_feature.setter
def preceding_feature(self, preceding_feature):
self._preceding_feature = preceding_feature
@property
def following_feature(self):
return self._following_feature
@following_feature.setter
def following_feature(self, following_feature):
self._following_feature = following_feature
# if this event's following feature is being reassigned to a later event,
# the 'following_feature' argument will be None
if self.following_feature:
self.end_clip_number = self.following_feature.end_clip_number
else:
self.end_clip_number = self.target_feature_list[-1].end_clip_number
def __str__(self):
print_string = 'SHRP2 NDS Video Event\n\n'
if self.preceding_feature:
print_string += 'Preceding Feature:\n{}\n\n'.format(
self.preceding_feature)
print_string += 'Target Features:\n{}\n\n'.format(self.target_feature_list)
if self.following_feature:
print_string += 'Following Feature:\n{}\n\n'.format(
self.following_feature)
return print_string
class StoppedOnCrossingIncursionEvent:
def __init__(self, event_id, target_feature_list,
preceding_feature=None, following_feature=None):
"""Create a new 'Event' object.
Args:
event_id: int. The position of the event in the source video relative to
other events.
target_feature_list: Feature List. A list of features the event of
interest could contain.
preceding_feature: Feature. An auxiliary feature strictly different in
type from the target feature that should be included in the event if it
occurs just before the target feature in the source video.
following_feature: Feature. An auxiliary feature strictly different in
type from the target feature that should be included in the event if it
occurs just after the target feature in the source video.
"""
self.event_id = event_id
self.target_feature_list = target_feature_list
for target_feature in self.target_feature_list:
target_feature.event_id = self.event_id
self.start_clip_number = self.target_feature_list[0].start_clip_number
self.end_clip_number = self.target_feature_list[-1].end_clip_number
self.length = self.end_clip_number - self.start_clip_number
self._preceding_feature = preceding_feature
self._following_feature = following_feature
# self.contains_stopped_on_crossing_violation = None
self.contains_veh_std_on_se_crsg = None
self.contains_veh_std_on_ne_crsg = None
self.contains_veh_std_on_sw_crsg = None
self.contains_veh_std_on_nw_crsg = None
self.train_is_present = None
def find_violations(self, classifications):
# find vehicle violations
self.contains_veh_std_on_se_crsg = np.any(classifications[:, 55])
self.contains_veh_std_on_ne_crsg = np.any(classifications[:, 58])
self.contains_veh_std_on_sw_crsg = np.any(classifications[:, 61])
self.contains_veh_std_on_nw_crsg = np.any(classifications[:, 64])
self.train_is_present = np.any(
classifications[:,
[6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]])
@property
def preceding_feature(self):
return self._preceding_feature
@preceding_feature.setter
def preceding_feature(self, preceding_feature):
self._preceding_feature = preceding_feature
# self.start_timestamp = self.preceding_feature.start_timestamp
# self.start_clip_number = self.preceding_feature.start_clip_number
@property
def following_feature(self):
return self._following_feature
@following_feature.setter
def following_feature(self, following_feature):
self._following_feature = following_feature
# if this event's following feature is being reassigned to a later event,
# the 'following_feature' argument will be None
if self.following_feature:
self.end_timestamp = self.following_feature.end_timestamp
self.end_clip_number = self.following_feature.end_clip_number
else:
self.end_timestamp | |
ymax > self._state_dims[1]:
# ymax = self._state_dims[1]
# screen_ymax = screen_ymin + len(np.arange(ymin, ymax, self.yscale))
# if zmax > self._state_dims[2]:
# zmax = self._state_dims[2]
# screen_zmax = screen_zmin + len(np.arange(zmin, zmax, self.zscale))
#
# # take image, mask it w agent trajectory
# agent_trajectory = self.trajectory_to_branch()
# agent_trajectory *= -1 # agent frames are negative
# # paste agent trajectory ontop of original state, but only when vals are not 0
# agent_mask = agent_trajectory.astype(bool)
# # print("agent traj shape", np.shape(agent_trajectory), np.shape(agent_mask))
# if agent_mask.any(): # agent trajectory not empty
# np.copyto(self.swc_to_tiff, agent_trajectory, casting='no', where=agent_mask)
# assert self.swc_to_tiff is not None
#
# # crop image data to update what network sees
# # image coordinate system becomes screen coordinates
# # scale can be thought of as a stride
# # TODO: check if we need to keep "stride" from upstream
# observation[screen_xmin:screen_xmax, screen_ymin:screen_ymax, screen_zmin:screen_zmax] = self.swc_to_tiff[
# xmin:xmax,
# ymin:ymax,
# zmin:zmax]
#
# # update _observation_bounds limits from input image coordinates
# # this is what the network sees
# self._observation_bounds = ObservationBounds(xmin, xmax,
# ymin, ymax,
# zmin, zmax)
#
# return observation
# def connect_nodes_to_img(self):
# """take location history, generate connected branches using Vaa3d plugin
# FIXME this function is horribly inefficient
# """
# locations = self._agent_nodes[:self.cnt] # grab everything up until the current ts
# # print("iter ", self.cnt, "locations: ", locations)
# # print("og state shape ", np.shape(self.original_state))
# # print("self obs dims ", self.observation_dims)
# # if the agent hasn't drawn any nodes, then the branch is empty. skip pipeline, return empty arr.
# if not locations.any(): # if all zeros, evals to False
# output_npy = np.zeros_like(self.original_state)
# else:
# fname = 'agent_trajectory' + str(np.random.randint(low=0, high=int(99e10)))
#
# # try:
# with tempfile.TemporaryDirectory() as tmpdir:
#
# output_swc = locations_to_swc(locations, fname, output_dir=tmpdir, overwrite=False)
# # TODO: be explicit about bounds to swc_to_tiff
# output_tiff_path = swc_to_TIFF(fname, output_swc, output_dir=tmpdir, overwrite=False)
# with THREAD_LOCKER:
# output_npy_path = TIFF_to_npy(fname, output_tiff_path, output_dir=tmpdir,
# overwrite=False)
# output_npy = np.load(output_npy_path).astype(float)
# # except IOError as e:
# # print('IOError', e)
# # finally:
# # print("agent trajectory shape ", np.shape(output_npy))
# np.clip(output_npy, 0, 1, out=output_npy)
#
# return output_npy
# def crop_brain(self, xmin, xmax, ymin, ymax, zmin, zmax):
# return self.state[xmin:xmax, ymin:ymax, zmin:zmax]
def distance_to_nearest_unexplored_node(self, location=None):
# allow to chose reference location
if location is None:
location = self._location
unvisisted_nodes = np.transpose(np.where(self._state > 0))
if unvisisted_nodes.size == 0:
raise Exception
else:
distance_to_nearest = distance.cdist([location], unvisisted_nodes).min()
self.curr_distance = distance_to_nearest
return distance_to_nearest
def _calc_reward(self, go_out, backtrack, node_found):
""" Calculate the new reward based on the increase in IoU
"""
if go_out:
reward = -2
return reward
if backtrack:
reward = -1
return reward
if node_found:
reward = 2
return reward
if self.cnt == 0:
previous_distance = np.inf
else:
previous_distance = self._distances[self.cnt - 1]
distance_to_nearest = self.distance_to_nearest_unexplored_node()
# print("distance: ", distance_to_nearest)
if distance_to_nearest < previous_distance:
reward = 1
else: # going farther away
reward = -1
return reward
# # overrides everything else
# # if terminal_found:
# # reward = 500
# # return reward
# if go_out:
# reward = -20
# return reward
# if backtrack:
# reward = -5
# return reward
#
#
# # TODO, double check if indexes are correct
# if self.cnt == 0:
# previous_IOU = 0.
# else:
# previous_IOU = self._IOU_history[self.cnt - 1]
# IOU_difference = self.curr_IOU - previous_IOU
# # print(self.cnt, self._history_length)
# # print("curr IOU = ", self.curr_IOU, "prev IOU = ", self._IOU_history[self.cnt - 1], "diff = ", IOU_difference,
# # "loc ", self._location)
# assert isinstance(IOU_difference, float)
#
# if IOU_difference > 0:
# reward = 200
# else: # didn't go out, backtrack, or improve score
# reward = -1
#
# if terminal_found:
# reward = 1000
#
#
#
# return reward
def _is_in_bounds(self, coords):
assert len(coords) == 3
x, y, z = coords
bounds = self._observation_bounds
is_in_bounds = ((bounds.xmin <= x <= bounds.xmax and
bounds.ymin <= y <= bounds.ymax and
bounds.zmin <= z <= bounds.zmax))
# if not is_in_bounds:
# print("out of bounds :", coords)
return is_in_bounds
# @property
# def _oscillate(self):
# """ Return True if the agent is stuck and oscillating
# """
# # TODO reimplement
# # TODO: erase last few frames if oscillation is detected
# counter = Counter(self._agent_nodes)
# freq = counter.most_common()
#
# # TODO: wtF?
# if freq[0][0] == (0, 0, 0):
# if (freq[1][1] > 3):
# return True
# else:
# return False
# elif (freq[0][1] > 3):
# return True
#
# def get_action_meanings(self):
# """ return array of integers for actions"""
# ACTION_MEANING = {
# 1: "UP", # MOVE Z+
# 2: "FORWARD", # MOVE Y+
# 3: "RIGHT", # MOVE X+
# 4: "LEFT", # MOVE X-
# 5: "BACKWARD", # MOVE Y-
# 6: "DOWN", # MOVE Z-
# }
# return [ACTION_MEANING[i] for i in self.actions]
# @property
# def getScreenDims(self):
# """
# return screen dimensions
# """
# return (self.width, self.height, self.depth)
#
# def lives(self):
# return None
def reset_stat(self):
""" Reset all statistics counter"""
self.stats = defaultdict(list)
# stat counter to store current score or accumlated reward
self.rewards = StatCounter()
self.episode_duration = StatCounter()
self.num_games = StatCounter()
self.num_success = StatCounter()
self.num_backtracked = StatCounter()
self.num_go_out = StatCounter()
self.num_stuck = StatCounter()
# self.best_q_vals = StatCounter()
self.num_act0 = StatCounter()
self.num_act1 = StatCounter()
self.num_act2 = StatCounter()
self.num_act3 = StatCounter()
self.num_act4 = StatCounter()
self.num_act5 = StatCounter()
def display(self):
"""this is called at every step"""
# current_point = self._location
# img = cv2.cvtColor(plane, cv2.COLOR_GRAY2RGB) # congvert to rgb
# rescale image
# INTER_NEAREST, INTER_LINEAR, INTER_AREA, INTER_CUBIC, INTER_LANCZOS4
# scale_x = 1
# scale_y = 1
# print("nodes ", self._agent_nodes)
# print("ious", self._IOU_history)
# print("reward history ", np.unique(self.reward_history))
# print("IOU history ", np.unique(self._IOU_history))
plotter = Viewer(self.human_locations, self._agent_nodes, self.reward_history,
filepath=self.filename, state_dimensions=self.original_state.shape)
#
# #
# # from viewer import SimpleImageViewer
# # self.viewer = SimpleImageViewer(self.swc_to_tiff,
# # scale_x=1,
# # scale_y=1,
# # filepath=self.filename)
# self.gif_buffer = []
#
#
# # render and wait (viz) time between frames
# self.viewer.render()
# # time.sleep(self.viz)
# # save gif
if self.saveGif:
raise NotImplementedError
# image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
# data = image_data.get_data('RGB', image_data.width * 3)
# arr = np.array(bytearray(data)).astype('uint8')
# arr = np.flip(np.reshape(arr, (image_data.height, image_data.width, -1)), 0)
# im = Image.fromarray(arr)
# self.gif_buffer.append(im)
#
# if not self.terminal:
# gifname = self.filename.split('.')[0] + '.gif'
# self.viewer.saveGif(gifname, arr=self.gif_buffer,
# duration=self.viz)
if self.saveVideo:
dirname = 'tmp_video'
# if self.cnt <= 1:
# if os.path.isdir(dirname):
# logger.warn("""Log directory {} exists! Use 'd' to delete it. """.format(dirname))
# act = input("select action: d (delete) / q (quit): ").lower().strip()
# if act == 'd':
# shutil.rmtree(dirname, ignore_errors=True)
# else:
# raise OSError("Directory {} exits!".format(dirname))
# os.mkdir(dirname)
vid_fpath = str(np.sum(self.reward_history)) + self.filename + '.mp4'
vid_fpath = dirname + '/' + vid_fpath + '.mp4'
with THREAD_LOCKER:
plotter.save_vid(vid_fpath)
# plotter.show_agent()
if self.viz: # show progress
# plotter.show()
# actually, let's just save the files for later
output_dir = os.path.abspath("saved_trajectories/")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# outfile_fpath = os.path.join(output_dir, input_fname + ".npy")
#
# # don't overwrite
# if not os.path.isfile(outfile_fpath) or overwrite:
# desired_len = 16
# img_array = tiff2array.imread(input_fpath)
# # make all arrays the same shape
# # format: ((top, bottom), (left, right))
# shp = img_array.shape
# # print(shp, flush=True)
# if shp != (desired_len, desired_len, desired_len):
# try:
# img_array = np.pad(img_array, (
# (0, desired_len - shp[0]), (0, desired_len - shp[1]), (0, desired_len - shp[2])),
# 'constant')
# except ValueError:
# raise
# # print(shp, flush=True) # don't wait for all threads to finish before printing
#
np.savez(output_dir + self.filename, locations=self._agent_nodes, original_state=self.original_state,
reward_history=self.reward_history)
# return outfile_fpath
# class DiscreteActionSpace(object):
#
# def __init__(self, num):
# super(DiscreteActionSpace, self).__init__()
# self.num = num
# self.rng = get_rng(self)
#
# def sample(self):
# return self.rng.randint(self.num)
#
# def num_actions(self):
# return | |
<filename>openapi_client/api/default_api.py<gh_stars>0
# coding: utf-8
"""
JDX reference application API
This is a collection of schemas and endpoints for the various JDX, Concentric Sky facing REST endpoints, the schemas define an API contract of sorts between the request and response expectations of the JDX reference application. This API is to be mutually developed by Concentric Sky and BrightHive. # noqa: E501
The version of the OpenAPI document: 0.0.17
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import (
ApiTypeError,
ApiValueError
)
class DefaultApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def framework_recommendations_post(self, **kwargs): # noqa: E501
"""Get framework recommendations based on the uploaded job descripton and context. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.framework_recommendations_post(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Request request: Get framework-recommendations for a given Pipeline ID.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: FrameworkRecommendationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.framework_recommendations_post_with_http_info(**kwargs) # noqa: E501
def framework_recommendations_post_with_http_info(self, **kwargs): # noqa: E501
"""Get framework recommendations based on the uploaded job descripton and context. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.framework_recommendations_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Request request: Get framework-recommendations for a given Pipeline ID.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(FrameworkRecommendationResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method framework_recommendations_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request' in local_var_params:
body_params = local_var_params['request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/framework-recommendations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FrameworkRecommendationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def framework_selections_post(self, **kwargs): # noqa: E501
"""The user indicates what frameworks they selected # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.framework_selections_post(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param FrameworkSelectionRequest framework_selection_request: framework selections
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Response
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.framework_selections_post_with_http_info(**kwargs) # noqa: E501
def framework_selections_post_with_http_info(self, **kwargs): # noqa: E501
"""The user indicates what frameworks they selected # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.framework_selections_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param FrameworkSelectionRequest framework_selection_request: framework selections
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Response, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['framework_selection_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method framework_selections_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'framework_selection_request' in local_var_params:
body_params = local_var_params['framework_selection_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/framework-selections', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Response', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def generate_job_schema_plus_post(self, **kwargs): # noqa: E501
"""Generate JobSchema+ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.generate_job_schema_plus_post(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Request request: Generate JobSchema+ from a given pipeline_id
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GenerateJobSchemaPlusResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.generate_job_schema_plus_post_with_http_info(**kwargs) # noqa: E501
def generate_job_schema_plus_post_with_http_info(self, **kwargs): # noqa: E501
"""Generate JobSchema+ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.generate_job_schema_plus_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Request request: Generate JobSchema+ from a given pipeline_id
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GenerateJobSchemaPlusResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method generate_job_schema_plus_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request' in local_var_params:
body_params = local_var_params['request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/generate-job-schema-plus', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GenerateJobSchemaPlusResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_score_post(self, **kwargs): # noqa: E501
"""Provides a scored based on how much metadata you provide and the quality of that data. # noqa: E501
This | |
eforearm.tail = pforearm_driven.head
else:
if arm.helper_bones['arms']['ulna'] and arm.helper_bones['arms']['ulna'][index]:
prefix, bone = bone_convert(arm.helper_bones['arms']['ulna'][index])
pulna = armature.pose.bones[prefix + bone]
eforearm.tail = pulna.head
if arm.symmetrical_bones['arms']['hand'] and arm.symmetrical_bones['arms']['hand'][index]:
prefix, bone = bone_convert(arm.symmetrical_bones['arms']['hand'][index])
phand = armature.pose.bones[prefix + bone]
ehand = armature.data.edit_bones[prefix + bone]
if arm.helper_bones['arms']['ulna'] and arm.helper_bones['viewmodel']['forearm_driven'] and arm.helper_bones['arms']['ulna'][index] and arm.helper_bones['viewmodel']['forearm_driven'][index]:
prefix, bone = bone_convert(arm.helper_bones['arms']['ulna'][index])
eulna = armature.data.edit_bones[prefix + bone]
eulna.tail = phand.head
prefix, bone = bone_convert(arm.helper_bones['viewmodel']['forearm_driven'][index])
eforearm_driven = armature.data.edit_bones[prefix + bone]
eforearm_driven.tail = eulna.head
#If both ulna and wrist are present
elif arm.helper_bones['arms']['ulna'] and arm.helper_bones['arms']['wrist'] and arm.helper_bones['arms']['ulna'][index] and arm.helper_bones['arms']['wrist'][index]:
prefix, bone = bone_convert(arm.helper_bones['arms']['ulna'][index])
eulna = armature.data.edit_bones[prefix + bone]
eulna.tail = phand.head
eulna.length = eulna.length/1.6
prefix, bone = bone_convert(arm.helper_bones['arms']['wrist'][index])
ewrist = armature.data.edit_bones[prefix + bone]
ewrist.head = eulna.tail
ewrist.tail = phand.head
#Else if only ulna is present
elif arm.helper_bones['arms']['ulna'] and arm.helper_bones['arms']['ulna'][index]:
prefix, bone = bone_convert(arm.helper_bones['arms']['ulna'][index])
eulna = armature.data.edit_bones[prefix + bone]
eulna.tail = phand.head
#Else if only wrist is present
elif arm.helper_bones['arms']['wrist'] and arm.helper_bones['arms']['wrist'][index]:
prefix, bone = bone_convert(arm.helper_bones['arms']['wrist'][index])
ewrist = armature.data.edit_bones[prefix + bone]
eforearm.length = eforearm.length/1.3
ewrist.head = eforearm.tail
ewrist.tail = phand.head
eforearm.tail = ewrist.head
ewrist.use_connect = True
else: #If neither are present
eforearm.tail = phand.head
ehand.use_connect = True
##Quadricep##
for index, bone in enumerate(arm.symmetrical_bones['legs']['thigh']):
if bone:
prefix, bone = bone_convert(bone)
ethigh = armature.data.edit_bones[prefix + bone]
#bone2 present to avoid problems with the last condition
#Force thigh to use quad's position if available
if arm.helper_bones['legs']['quadricep'] and arm.helper_bones['legs']['quadricep'][index]:
prefix2, bone2 = bone_convert(arm.helper_bones['legs']['quadricep'][index])
pquadricep = armature.pose.bones[prefix2 + bone2]
equadricep = armature.data.edit_bones[prefix2 + bone2]
ethigh.tail = pquadricep.head
if arm.symmetrical_bones['legs']['calf'] and arm.symmetrical_bones['legs']['calf'][index]:
prefix2, bone2 = bone_convert(arm.symmetrical_bones['legs']['calf'][index])
pcalf = armature.pose.bones[prefix2 + bone2]
equadricep.tail = pcalf.head
#Gluteus (Only for Zoey)
if arm.helper_bones['others'].get('gluteus'):
if arm.helper_bones['others']['gluteus'] and arm.helper_bones['others']['gluteus'][index]:
prefix2, bone2 = bone_convert(arm.helper_bones['others']['gluteus'][index])
pgluteus = armature.pose.bones[prefix2 + bone2]
pgluteus.rotation_quaternion[3] = -1
pgluteus.scale.xyz = 25,25,25
bpy.ops.object.mode_set(mode='POSE')
armature.data.bones[prefix2 + bone2].select = True
bpy.ops.pose.armature_apply(selected=True)
bpy.ops.pose.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='EDIT')
ethigh = armature.data.edit_bones[prefix + bone]
egluteus = armature.data.edit_bones[prefix2 + bone2]
ethigh.head = egluteus.tail
#Shoulder1 (Only for Louis)
if arm.helper_bones['arms'].get('shoulder1'):
if arm.symmetrical_bones['arms']['clavicle']:
prefix, bone = bone_convert(arm.symmetrical_bones['arms']['clavicle'][0])
eclavicle = armature.data.edit_bones[prefix + bone]
if arm.helper_bones['arms']['shoulder']:
prefix, bone = bone_convert(arm.helper_bones['arms']['shoulder'][0])
eshoulder = armature.data.edit_bones[prefix + bone]
eclavicle.tail = eshoulder.head
elif arm.symmetrical_bones['arms']['upperarm']:
prefix, bone = bone_convert(arm.symmetrical_bones['arms']['upperarm'][0])
eupperarm = armature.data.edit_bones[prefix + bone]
eclavicle.tail = eupperarm.head
##Thumbroot## (Only for viewmodels)
for index, bone in enumerate(arm.symmetrical_bones['arms']['hand']):
if bone:
prefix, bone = bone_convert(bone)
phand = armature.pose.bones[prefix + bone]
ehand = armature.data.edit_bones[prefix + bone]
if arm.helper_bones['viewmodel']['thumbroot'] and arm.helper_bones['viewmodel']['thumbroot'][index]:
prefix, bone = bone_convert(arm.helper_bones['viewmodel']['thumbroot'][index])
ethumbroot = armature.data.edit_bones[prefix + bone]
ethumbroot.head = phand.head
if arm.symmetrical_bones['fingers']['finger0'] and arm.symmetrical_bones['fingers']['finger0'][index]:
prefix, bone = bone_convert(arm.symmetrical_bones['fingers']['finger0'][index])
pfinger0 = armature.pose.bones[prefix + bone]
ethumbroot.tail = pfinger0.head
if vatinfo.sbox:
for index, bone in enumerate(arm.symmetrical_bones['legs']['thigh']):
if bone:
prefix, bone = bone_convert(bone)
ethigh = armature.data.edit_bones[prefix + bone]
if arm.helper_bones['legs']['quadricep'] and arm.helper_bones['legs']['quadricep'][index]:
prefix, bone = bone_convert(arm.helper_bones['legs']['quadricep'][index])
equadricep = armature.data.edit_bones[prefix + bone]
equadricep.head = ethigh.head
equadricep.length = equadricep.length / 3
ethigh.head = equadricep.tail
if arm.symmetrical_bones['legs']['calf'] and arm.symmetrical_bones['legs']['calf'][index]:
prefix, bone = bone_convert(arm.symmetrical_bones['legs']['calf'][index])
ecalf = armature.data.edit_bones[prefix + bone]
ethigh.tail = ecalf.head
for index, bone in enumerate(arm.symmetrical_bones['legs']['foot']):
if bone:
prefix, bone = bone_convert(bone)
efoot = armature.data.edit_bones[prefix + bone]
if arm.helper_bones['legs'].get('lowerleg') and arm.helper_bones['legs']['lowerleg'][index]:
prefix, bone = bone_convert(arm.helper_bones['legs']['lowerleg'][index])
elowerleg = armature.data.edit_bones[prefix + bone]
elowerleg.tail = efoot.head
for index, bone in enumerate(arm.symmetrical_bones['arms']['hand']):
if bone:
prefix, bone = bone_convert(bone)
ehand = armature.data.edit_bones[prefix + bone]
if arm.helper_bones['arms']['wrist'] and arm.helper_bones['arms']['wrist'][index]:
prefix, bone = bone_convert(arm.helper_bones['arms']['wrist'][index])
ewrist = armature.data.edit_bones[prefix + bone]
ewrist.length = ewrist.length*1.35
ehand.tail = ewrist.tail
ewrist.length = ewrist.length/1.5
ehand.head = ewrist.tail
if vatinfo.titanfall:
#Changes pelvis position to avoid deletion
if arm.central_bones['pelvis'] and arm.central_bones['spine1']:
prefix, bone = bone_convert(arm.central_bones['pelvis'][0])
epelvis = armature.data.edit_bones[prefix + bone]
prefix, bone = bone_convert(arm.central_bones['spine1'][0])
espine1 = armature.data.edit_bones[prefix + bone]
epelvis.tail = espine1.head
epelvis.length = epelvis.length/3
#Aligns calf to the thigh
for index, bone in enumerate(arm.symmetrical_bones['legs']['calf']):
if bone:
prefix, bone = bone_convert(bone)
ecalf = armature.data.edit_bones[prefix + bone]
if arm.symmetrical_bones['legs'].get('thighlow') and arm.symmetrical_bones['legs']['thighlow'][index]:
prefix, bone = bone_convert(arm.symmetrical_bones['legs']['thighlow'][index])
ethighlow = armature.data.edit_bones[prefix + bone]
ecalf.head = ethighlow.tail
ecalf.use_connect = True
elif arm.symmetrical_bones['legs']['thigh'] and arm.symmetrical_bones['legs']['thigh'][index]:
prefix, bone = bone_convert(arm.symmetrical_bones['legs']['thigh'][index])
ethigh = armature.data.edit_bones[prefix + bone]
ecalf.head = ethigh.tail
#Removes head bone since it serves no purpose and neck2 serves its purpose anyways, and repositions both neck bones to be more accurate to where they would really be
if arm.central_bones['head'] and arm.central_bones['neck'] and arm.central_bones.get('neck2'):
prefix, bone = bone_convert(arm.central_bones['head'][0])
ehead = armature.data.edit_bones[prefix + bone]
prefix, bone = bone_convert(arm.central_bones['neck'][0])
eneck = armature.data.edit_bones[prefix + bone]
prefix, bone = bone_convert(arm.central_bones['neck2'][0])
eneck2 = armature.data.edit_bones[prefix + bone]
eneck.tail = eneck2.head
eneck2.tail = ehead.tail
eneck2.parent = eneck
eneck2.use_connect = True
armature.data.edit_bones.remove(ehead)
#Corrects central bones roll values to 0
if type == 'anim':
for container, bone in arm.central_bones.items():
for bone in bone:
if bone:
if vatinfo.titanfall and bone.title().count('Head'):
continue
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
ebone.roll = 0
#Finger tips tweak
for container, bone in arm.symmetrical_bones['fingers'].items():
if container == 'finger0' or container == 'finger1' or container == 'finger2' or container == 'finger3' or container == 'finger4':
for index, bone in enumerate(bone):
if bone:
prefix, bone = bone_convert(bone)
tip = container[0:7] + '2'
middle = container[0:7] + '1'
if arm.symmetrical_bones['fingers'][tip] and arm.symmetrical_bones['fingers'][tip][index]:
prefix, bone = bone_convert(arm.symmetrical_bones['fingers'][middle][index])
ebone = armature.data.edit_bones[prefix + bone]
length = ebone.length
ebone.length = length*2
prefix, bone = bone_convert(arm.symmetrical_bones['fingers'][tip][index])
armature.data.edit_bones[prefix + bone].tail.xyz = ebone.tail.x, ebone.tail.y, ebone.tail.z
ebone.length = length
elif arm.symmetrical_bones['fingers'][middle] and arm.symmetrical_bones['fingers'][middle][index]:
prefix, bone = bone_convert(arm.symmetrical_bones['fingers'][container][index])
ebone = armature.data.edit_bones[prefix + bone]
length = ebone.length
ebone.length = length*2
prefix, bone = bone_convert(arm.symmetrical_bones['fingers'][middle][index])
armature.data.edit_bones[prefix + bone].tail = ebone.tail
ebone.length = length
#If no head
if not arm.central_bones['head']:
ebone = None
ebone2 = None
if arm.central_bones['spine4'] and arm.central_bones['spine2']:
prefix, bone = bone_convert(arm.central_bones['spine2'][0])
ebone = armature.data.edit_bones[prefix + bone]
prefix, bone = bone_convert(arm.central_bones['spine4'][0])
ebone2 = armature.data.edit_bones[prefix + bone]
elif arm.central_bones['spine3'] and arm.central_bones['neck']:
prefix, bone = bone_convert(arm.central_bones['spine3'][0])
ebone = armature.data.edit_bones[prefix + bone]
prefix, bone = bone_convert(arm.central_bones['neck'][0])
ebone2 = armature.data.edit_bones[prefix + bone]
elif arm.central_bones['spine3'] and arm.central_bones['spine2']:
prefix, bone = bone_convert(arm.central_bones['spine2'][0])
ebone = armature.data.edit_bones[prefix + bone]
prefix, bone = bone_convert(arm.central_bones['spine3'][0])
ebone2 = armature.data.edit_bones[prefix + bone]
if ebone and ebone2:
length = ebone.length
ebone.length = ebone.length*1.75
ebone2.tail = ebone.tail
ebone.length = length
ebone2.tail.y = ebone2.head.y
else:
#Gmod default viewmodels only have spine4, this aligns it
if arm.central_bones['spine4']:
prefix, bone = bone_convert(arm.central_bones['spine4'][0])
ebone = armature.data.edit_bones[prefix + bone]
ebone.tail.x = ebone.head.x
#Rotates bones with no children to be more readable while keeping their isolated form intact
if arm.chainless_bones:
for bone in arm.chainless_bones:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
if type == 'anim':
if ebone.children[0].name.endswith('.isolated'):
ebone2 = armature.data.edit_bones[ebone.children[0].name]
ebone2.parent = None
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.armature_apply()
bpy.ops.pose.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='EDIT')
if type == 'anim':
for bone in arm.chainless_bones:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
ebone2 = armature.data.edit_bones[prefix + bone + '.isolated']
ebone2.parent = ebone
armature.location = arm.armature.location
armature.rotation_euler = arm.armature.rotation_euler
armature.scale = arm.armature.scale
#Final touches to the armature
armature.data.display_type = 'OCTAHEDRAL'
armature.show_in_front = True
if type == 'weight':
armature.data.show_bone_custom_shapes = False
elif type == 'anim':
armature.data.rigify_advanced_generation = True
armature.data.rigify_generate_mode = 'new'
armature.data.rigify_rig_basename = arm.armature.name + '.anim'
bpy.ops.object.mode_set(mode='OBJECT')
#Deletion
elif action == 1 or action == 2:
#Checks if they weren't deleted already
if type == 'weight':
try:
bpy.data.objects.remove(arm.weight_armature)
except:
print("Weight armature already deleted, cleaning rest")
try:
bpy.data.armatures.remove(arm.weight_armature_real)
except:
pass
vatinfo.weight_armature = False
arm.weight_armature = None
arm.weight_armature_real = None
elif type == 'anim':
if not vatinfo.animation_armature_setup:
try:
animation_data = bpy.data.objects[arm.animation_armature_real['target_object']].data
bpy.data.objects[arm.animation_armature_real['target_object']].data = bpy.data.meshes[arm.animation_armature_real['target_object_data']]
bpy.data.meshes.remove(animation_data)
except:
pass
try:
bpy.data.objects.remove(arm.animation_armature)
except:
print("Animation armature already deleted, cleaning rest")
bpy.data.armatures.remove(arm.animation_armature_real)
if action == 1 and vatinfo.animation_armature_setup:
try:
object = bpy.data.objects[arm.armature.name + '.anim']
bpy.data.objects.remove(object)
except:
pass
try:
armature = bpy.data.armatures[arm.armature_real.name + '.anim']
bpy.data.armatures.remove(armature)
except:
pass
elif action == 2:
arm.animation_armature = bpy.data.objects[arm.armature.name + '.anim']
arm.animation_armature_real = bpy.data.armatures[arm.armature_real.name + '.anim']
#Checks if retarget empties are present, if so, remove them
if action == 1:
armature = arm.armature
#Removes viewmodel camera if present
try:
camera = bpy.data.objects['viewmodel_camera']
camera_data = bpy.data.cameras['viewmodel_camera']
bpy.data.objects.remove(camera)
bpy.data.cameras.remove(camera_data)
except:
pass
#Removes original armature constraints
for cat in arm.symmetrical_bones.keys():
for bone in arm.symmetrical_bones[cat].values():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
try:
constraint = armature.pose.bones[prefix + bone].constraints["Retarget Location"]
armature.pose.bones[prefix + bone].constraints.remove(constraint)
except:
pass
try:
constraint | |
Find best matching ground truth box
sorted_ixs = np.argsort(overlaps[i])[::-1]
# print('\n', i, ' sorted overlaps:',overlaps[i, sorted_ixs])
for j in sorted_ixs:
# If ground truth box is already matched, go to next one
if gt_match[j] == 1:
continue
# If we reach IoU smaller than the threshold, end the loop
iou = overlaps[i, j]
# print('overlaps[',i,',',j,'] :', overlaps[i,j])
if iou < iou_threshold:
if verbose:
print(' i:', i, ' pred_box[i]:', pred_boxes[i], 'class[i]:', pred_class_ids[i],' gt_bx j',j, gt_boxes[j], 'class: ', gt_class_ids[j], ', iou:', round(iou,4), 'not meeting IoU threshold')
break
# Do we have a match?
if pred_class_ids[i] == gt_class_ids[j]:
if verbose:
print(' i:', i, ' pred_box[i]:', pred_boxes[i], 'class[i]:', pred_class_ids[i],' gt_bx j:',j, gt_boxes[j], 'class: ', gt_class_ids[j], ', iou:', round(iou,4))
match_count += 1
gt_match[j] = 1
pred_match[i] = 1
break
# Compute precision and recall at each prediction box step
precisions = np.cumsum(pred_match) / (np.arange(len(pred_match)) + 1)
recalls = np.cumsum(pred_match).astype(np.float32) / len(gt_match)
if verbose:
print(' Cummulatvie sum precision/recalls')
print(' predictions: ', (np.arange(len(pred_match)) + 1))
print(' matches(TP): ', np.cumsum(pred_match))
print(' precisions: ', precisions)
print(' recalls: ', recalls)
print(' recalls= predictions /',len(gt_match))
# Pad with start and end values to simplify the math
precisions = np.concatenate([[0], precisions, [0]])
recalls = np.concatenate([[0], recalls, [1]])
# Ensure precision values decrease but don't increase. This way, the
# precision value at each recall threshold is the maximum it can be
# for all following recall thresholds, as specified by the VOC paper.
for i in range(len(precisions) - 2, -1, -1):
precisions[i] = np.maximum(precisions[i], precisions[i + 1])
if verbose:
print(' precisions: ', precisions)
print(' recalls: ', recalls)
# Compute mean AP over recall range
indices = np.where(recalls[:-1] != recalls[1:])[0] + 1
mAP = np.sum((recalls[indices] - recalls[indices - 1]) *
precisions[indices])
if verbose:
print(' ', np.where(recalls[:-1] != recalls[1:])[0])
print(' indices: ', indices)
print(' recall diff: ', (recalls[indices] - recalls[indices - 1]))
print(' * PREC: ', (recalls[indices] - recalls[indices - 1])*precisions[indices])
print(' mAP: ', mAP)
return mAP, precisions, recalls, overlaps
def compute_recall(pred_boxes, gt_boxes, iou):
'''
Compute the recall at the given IoU threshold. It's an indication
of how many GT boxes were found by the given prediction boxes.
pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates
gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates
'''
# Measure overlaps
overlaps = compute_overlaps(pred_boxes, gt_boxes)
iou_max = np.max(overlaps, axis=1)
iou_argmax = np.argmax(overlaps, axis=1)
positive_ids = np.where(iou_max >= iou)[0]
matched_gt_boxes = iou_argmax[positive_ids]
recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0]
return recall, positive_ids
##------------------------------------------------------------------------------------------
## Apply non maximal suppression on a set of bounding boxes
##------------------------------------------------------------------------------------------
def non_max_suppression(boxes, scores, threshold):
'''
Identify bboxes with an IoU > Threshold for suppression
Performs non-maximum supression and returns indicies of kept boxes.
Input:
------
boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
scores: 1-D array of box scores.
threshold: Float. IoU threshold to use for filtering.
'''
assert boxes.shape[0] > 0
if boxes.dtype.kind != "f":
boxes = boxes.astype(np.float32)
# print(' non_max_suppression ')
# Compute box areas
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
area = (y2 - y1) * (x2 - x1)
# Get indicies of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
# print('====> Initial Ixs: ', ixs)
while len(ixs) > 0:
# Pick top box and add its index to the list
i = ixs[0]
# print(' starting ixs : ', ixs,' compare ',i, ' with ', ixs[1:])
pick.append(i)
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
# Identify boxes with IoU over the threshold. This
# returns indicies into ixs[1:], so add 1 to get
# indicies into ixs.
#tst = np.where(iou>threshold)
remove_ixs = np.where(iou > threshold)[0] + 1
# print(' np.where( iou > threshold) : ' ,tst, 'tst[0] (index into ixs[1:]: ', tst[0],
# ' remove_ixs (index into ixs) : ',remove_ixs)
# Remove indicies of the picked and overlapped boxes.
ixs = np.delete(ixs, remove_ixs)
ixs = np.delete(ixs, 0)
# print(' edning ixs (after deleting ixs[0]): ', ixs, ' picked so far: ',pick)
# print('====> Final Picks: ', pick)
return np.array(pick, dtype=np.int32)
############################################################################################
## Bounding box refinement - Compute Refinements/ Apply Refinements
############################################################################################
##------------------------------------------------------------------------------------------
## Bounding box refinement - apply delta bbox refinement - single bbox
##------------------------------------------------------------------------------------------
def apply_box_delta(box, delta):
"""
13-09-2018
Applies the given delta to the given box.
boxes: [y1, x1, y2, x2].
Note that (y2, x2) is outside the box.
deltas: [dy, dx, log(dh), log(dw)]
"""
box = box.astype(np.float32)
# Convert to y, x, h, w
height = box[2] - box[0]
width = box[3] - box[1]
center_y = box[0] + 0.5 * height
center_x = box[1] + 0.5 * width
# print(' first height: {} width: {} center x/y : {}/{}'.format(height, width, center_x, center_y))
# Apply deltas
center_y += delta[0] * height
center_x += delta[1] * width
height *= np.exp(delta[2])
width *= np.exp(delta[3])
# print(' second height: {} width: {} center x/y : {}/{}'.format(height, width, center_x, center_y))
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
# print(' third y1/x1: {}/{} y2/x2 : {}/{}'.format(y1,x1,y2,x2))
return np.array([y1, x1, y2, x2])
##------------------------------------------------------------------------------------------
## Bounding box refinement - apply delta bbox refinement - numpy version
##------------------------------------------------------------------------------------------
def apply_box_deltas_np(boxes, deltas):
"""
Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)].
Note that (y2, x2) is outside the box.
deltas: [N, (dy, dx, log(dh), log(dw))]
"""
boxes = boxes.astype(np.float32)
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= np.exp(deltas[:, 2])
width *= np.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
return np.stack([y1, x1, y2, x2], axis=1)
##------------------------------------------------------------------------------------------
## Bounding box refinement - apply delta bbox refinement - tensorflow version
##------------------------------------------------------------------------------------------
def apply_box_deltas_tf(boxes, deltas):
"""
Applies the given deltas to the given boxes.
boxes: [BS, N, (y1, x1, y2, x2)].
Note that (y2, x2) is outside the box.
deltas: [BS, N, (dy, dx, log(dh), log(dw))]
"""
boxes = tf.cast(boxes, tf.float32)
# Convert to y, x, h, w
height = boxes[:,:, 2] - boxes[:,:, 0]
width = boxes[:,:, 3] - boxes[:,:, 1]
center_y = boxes[:,:, 0] + 0.5 * height
center_x = boxes[:,:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:,:, 0] * height
center_x += deltas[:,:, 1] * width
height *= tf.exp(deltas[:,:, 2])
width *= tf.exp(deltas[:,:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
return tf.stack([y1, x1, y2, x2], axis=-1)
##------------------------------------------------------------------------------------------
## box_refinement - Compute Bbox delta refinement - single box
##------------------------------------------------------------------------------------------
def box_refinement(box, gt_box):
"""
Compute refinement needed to transform ONE bounding box to gt_box.
box : [y1, x1, y2, x2]
gt_box: [y1, x1, y2, x2]
(y2, x2) is assumed to be outside the box
"""
box = box.astype(np.float32)
gt_box = gt_box.astype(np.float32)
height = box[2] - box[0]
width = box[3] - box[1]
center_y = box[0] + 0.5 * height
center_x = box[1] + 0.5 * width
gt_height = gt_box[2] - gt_box[0]
gt_width = gt_box[3] - gt_box[1]
gt_center_y = gt_box[0] + 0.5 * gt_height
gt_center_x = gt_box[1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = np.log(gt_height / height)
| |
List of strings or integers, each identifying a run
:param extra_subdirs: Unpacked list of strings naming subdirectories of
the level parent directory
:return: Dictionary mapping string keys to string paths
"""
lvl_paths = {'parent': os.path.join(lvl_dir, sub_base + '_' + feat_name)}
for run in runs:
lvl_paths[run] = os.path.join(lvl_paths['parent'],
'level1_run-{}'.format(run))
for subdr in extra_subdirs:
lvl_paths[subdr] = os.path.join(lvl_paths['parent'], subdr + '_files')
return lvl_paths
def get_main_pipeline_arg_names():
"""
:return: Set containing strings naming all command-line arguments included
by default in the main script, pipeline_wrapper.py
"""
return {'bids_dir', 'censor', 'events_dir', 'fd', 'filter', 'fsl_dir',
'keep_all', 'levels', 'no_parallel', 'output', 'runs', 'ses',
'spat_smooth', 'subject', 'surf_smooth', 'study_dir', 'study_name',
'task', 'temp_dir', 'templates', 'template1', 'template2',
'vol_smooth', 'wb_command', WRAPPER_LOC}
def get_optional_cli_args(cli_args, drop_slurm=False):
"""
:param cli_args: Dictionary with all validated command-line arguments,
all of which are used by this function
:param drop_slurm: True to exclude SLURM arguments; else False
:return: List of most cli_args optional arguments and their values
"""
optional_args = list()
for arg in cli_args.keys():
if cli_args[arg] and not (drop_slurm and arg in SLURM_ARGS):
optional_args.append(as_cli_arg(arg))
if isinstance(cli_args[arg], list):
for el in cli_args[arg]:
optional_args.append(str(el))
elif not isinstance(cli_args[arg], bool):
optional_args.append(str(cli_args[arg]))
return optional_args
def get_pipeline_cli_argparser(arg_names=get_main_pipeline_arg_names()):
"""
:param arg_names: Set containing strings naming all command-line arguments
:return: argparse.ArgumentParser with all command-line arguments
needed to run pipeline_wrapper.py
"""
# Default values for user input arguments
default_BIDS_dir = 'abcd-hcp-pipeline'
default_censor_num = 0 # 2
default_fd = 0.9
default_smooth = 0
default_study_name = 'ABCD'
default_runs_lvls = [1, 2]
default_temporal_filter = 100
default_wb_command = get_default_ext_command('wb_command')
generic_dtseries_path = os.path.join(
'(--study-dir)', 'derivatives', '(--bids-dir)',
'(--subject)', '(--ses)', 'func',
'sub-(--subject)_ses-(--ses)_task-(--task)_'
'run-(--runs)_bold_timeseries.dtseries.nii'
)
generic_output_dirpath = os.path.join('(--study-dir)', 'derivatives',
'abcd-bids-tfmri-pipeline',
'(--subject)', '(--ses)')
# Strings used in multiple help messages
msg_default = ' By default, this argument\'s value(s) will be {}.'
msg_pipeline = 'Name of the {} that you are running the pipeline on.'
msg_smooth = ('Millimeters of {} smoothing that has already been applied '
'in the minimal processing steps.')
msg_template = 'Name (not full path) of the Level {} .fsf template file.'
msg_whole_num = ' This argument must be a positive integer.'
# Create parser with command-line arguments from user
parser = argparse.ArgumentParser(description=(
'ABCD fMRI Task Prep pipeline. Inputs must be in the same format '
'as ABCD-HCP-Pipeline outputs after running filemap.'
))
parser = add_arg_if_in_arg_names('bids_dir', arg_names, parser,
metavar='NAME_OF_BIDS_DERIVATIVES_PIPELINE_DIRECTORY',
default=default_BIDS_dir,
help=('Name of the BIDS-standard file-mapped directory with subject '
'data in the "derivatives" subdirectory of your --study-dir. '
'This path should be valid: ' + generic_dtseries_path +
msg_default.format(default_BIDS_dir))
)
# Specify how many initial frames/volumes to censor
parser = add_arg_if_in_arg_names('censor', arg_names, parser,
metavar='INITIAL_NUMER_OF_TIMEPOINTS_TO_CENSOR',
default=default_censor_num, type=valid_whole_number,
help=('The number of initial frames/volumes to censor.'
+ msg_whole_num + msg_default.format(default_censor_num))
)
parser = add_arg_if_in_arg_names('events_dir', arg_names, parser,
metavar='EVENT_FILES_DIRECTORY',
type=valid_readable_dir,
help='Valid path to a real directory containing event .tsv files.'
)
# Specify framewise displacement threshold to censor volumes with high motion
parser = add_arg_if_in_arg_names('fd', arg_names, parser,
metavar='FRAMEWISE_DISPLACEMENT_THRESHOLD',
default=default_fd, type=valid_float_0_to_1,
help=('The framewise displace threshold for censoring volumes with '
'high motion. This must be a decimal between 0 and 1.{}'
.format(msg_default.format(default_fd)))
)
# High pass temporal filter cutoff number value
parser = add_arg_if_in_arg_names('filter', arg_names, parser,
metavar='HIGH_PASS_TEMPORAL_FILTER_CUTOFF',
default=default_temporal_filter, type=valid_whole_number,
help=('High pass filter cutoff (in seconds).{}{}'.format(
msg_whole_num, msg_default.format(default_temporal_filter)
))
)
parser = add_arg_if_in_arg_names('fsl_dir', arg_names, parser,
'-fsl', '--fsl', dest='fsl_dir', type=valid_readable_dir,
help=('Valid path to an existing directory containing the executable '
'files fsl, fslmerge, fslmaths, flameo, and feat_model from '
'the FMRIB Software Library (FSL).')
)
parser = add_arg_if_in_arg_names('keep_all', arg_names, parser,
action='store_true',
help=('Include this flag to keep all files generated during the '
'pipeline. By default, the pipeline will only keep dtseries, '
'dof, log, and event files.')
)
# Which analysis levels to run
parser = add_arg_if_in_arg_names('levels', arg_names, parser,
metavar='ANALYSIS_LEVELS_TO_RUN',
nargs='*', choices=default_runs_lvls, type=valid_whole_number,
help=('Levels to conduct the analysis on: {0} for one run, and/or '
'{1} to merge multiple runs.'.format(*default_runs_lvls))
)
parser = add_arg_if_in_arg_names('no_parallel', arg_names, parser,
action='store_true',
help=('Include this flag to process level 1 analysis runs '
'sequentially. By default, the script will process the analyses '
'in parallel simultaneously.')
)
parser = add_arg_if_in_arg_names('output', arg_names, parser,
'-out', metavar='OUTPUT_DIRECTORY', type=valid_output_dir, # required=True,
help=('Directory path to save pipeline outputs into.'
+ msg_default.format(generic_output_dirpath))
)
# Specify the number of runs each subject has
parser = add_arg_if_in_arg_names('runs', arg_names, parser,
metavar='RUN',
default=default_runs_lvls, type=valid_whole_number, nargs="+",
help=('Each subject\'s number of runs. This argument must be 1 or '
'more positive integers provided as a space-delimited list. '
'For example: 1 2 3 4. By default, this argument\'s value(s) '
'will be 1 2.')
)
parser = add_arg_if_in_arg_names(SCAN_ARG, arg_names, parser,
type=valid_readable_file,
help=('Path to existing .csv file listing all scanners\' parameters. '
+ msg_default.format('scan_info/{}.csv in the code directory.'
.format(SCAN_ARG)))
)
# Which session to run the pipeline on
parser = add_arg_if_in_arg_names('ses', arg_names, parser,
metavar='SESSION', required=True, # default=default_ses,
type=lambda x: valid_subj_ses(x, 'ses-', 'session'), #, 'ses'),
help=msg_pipeline.format('session')
)
# Desired spatial smoothing number
parser = add_arg_if_in_arg_names('spat_smooth', arg_names, parser,
metavar='DESIRED_SPATIAL_SMOOTHING',
default=default_smooth, type=valid_whole_number,
help=('Millimeters of spatial smoothing that you want for the surface '
'and volume data.'
+ msg_whole_num + msg_default.format(default_smooth))
)
parser = add_arg_if_in_arg_names('subject', arg_names, parser,
metavar='SUBJECT_ID', required=True,
type=lambda x: valid_subj_ses(x, 'sub-', 'subject'), #, 'NDAR', 'INV'),
help='ID of subject to process.'
)
# Surface smoothing number
parser = add_arg_if_in_arg_names('surf_smooth', arg_names, parser,
metavar='CURRENT_SURFACE_SMOOTHING',
default=default_smooth, type=valid_whole_number,
help=''.join((msg_smooth.format('surface'), msg_whole_num,
msg_default.format(default_smooth)))
)
# Set file path for base directory and BIDS directory
parser = add_arg_if_in_arg_names('study_dir', arg_names, parser,
metavar='BIDS_BASE_STUDY_DIRECTORY',
type=valid_readable_dir, required=True,
help='Valid path to existing base study directory.'
)
parser = add_arg_if_in_arg_names('study_name', arg_names, parser,
metavar='STUDY_NAME', default=default_study_name,
help=msg_pipeline.format('study')
)
# Which task you are running the pipeline on
parser = add_arg_if_in_arg_names('task', arg_names, parser,
metavar='TASK_NAME', required=True,
help=msg_pipeline.format('task') # + msg_choices(choices_tasks)
)
parser = add_arg_if_in_arg_names('temp_dir', arg_names, parser,
type=valid_readable_dir, metavar='TEMPORARY_DIRECTORY',
help=('Valid path to existing directory to save temporary files into.')
)
parser = add_arg_if_in_arg_names('templates', arg_names, parser,
type=valid_readable_dir,
help='Valid path to existing directory with template .fsf files.'
)
for lvl in default_runs_lvls: # Specify the .fsf template files' names
parser = add_arg_if_in_arg_names(
'template{}'.format(lvl), arg_names, parser,
metavar='LEVEL_{}_TEMPLATE_NAME'.format(lvl),
type=valid_template_filename, help=msg_template.format(lvl)
)
# Volume smoothing number
parser = add_arg_if_in_arg_names('vol_smooth', arg_names, parser,
metavar='CURRENT_VOLUME_SMOOTHING',
default=default_smooth, type=valid_whole_number,
help=''.join((msg_smooth.format('volume'), msg_whole_num,
msg_default.format(default_smooth)))
)
# Specify path to wb_command
parser = add_arg_if_in_arg_names('wb_command', arg_names, parser,
default=default_wb_command, type=valid_readable_file,
help=('Path to wb_command file to run Workbench Command. If this flag '
'is excluded, then the script will try to guess the path to '
'the wb_command file by checking the user\'s BASH aliases. '
'Your default wb_command is "{}". If '
'that says "None", then you need to include this argument.'
.format(default_wb_command))
)
# Argument used to get this script's dir
parser = add_arg_if_in_arg_names(WRAPPER_LOC, arg_names, parser,
type=valid_readable_dir, required=True,
help=('Valid path to existing ABCD-BIDS-task-fmri-pipeline directory '
'that contains pipeline_wrapper.py')
)
return parser
def get_region_path_vars(cli_args, paths, run):
"""
Build and return paths to particular brain region images' files/dirs
by filling in the unique parts of generic path strings
:param cli_args: Dictionary containing all command-line arguments from user
:param paths: Dictionary of path strings, and of dictionaries of path
strings, used throughout processing in both levels
:param run: Whole number (as an int or a string) defining which run this is
:return: Tuple of string generic paths: design, func_str, subcort, surf_str
"""
# Paths to design file base and subcortical volume stats directory
design = os.path.join(paths['lvl_1']['fsf'],
get_sub_base(cli_args, run) + '_level1')
subcort = os.path.join(paths['lvl_1']['parent'], 'SubcorticalVolumeStats')
# Generic strings used as templates for paths later
func_str = os.path.join(paths['lvl_1']['intermediate'],
'{}{}_filtered.atlasroi{}.{}.32k_fs_LR.func.gii')
surf_str = os.path.join(paths['sub_ses']['anat'], (
'{}_hemi-{}_space-MNI_mesh-fsLR32k_midthickness.surf.gii'
))
return design, func_str, subcort, surf_str
def get_replacements(cli_args, **kwargs):
"""
:param cli_args: Dictionary containing all command-line arguments from user
:return: Dictionary mapping variables' generic names in template files to
those variables' actual values provided by the user
"""
replacements = {'SUBID': cli_args['subject'],
'FEAT_NAME': cli_args['study_name'], # Not paths['feat_name']
'FIN_SMOOTH': str(cli_args['spat_smooth']),
'HP_FILTER': str(cli_args['filter']),
'SESSION': cli_args['ses'], 'TASK': cli_args['task'],
'OUTPUT_DIR': cli_args['output'],
'EVENTS_DIR': cli_args['events_dir'],
'STUDY_DIR': cli_args['study_dir']}
replacements.update(kwargs)
return replacements
def get_sbatch_args(cli_args, job):
"""
:param cli_args: Dictionary containing all command-line arguments from user
:param job: | |
<gh_stars>1-10
import pymongo
import math
import datetime
import requests
import json
import numpy as np
from astropy.time import Time
import matplotlib.pyplot as plt
import os
from bs4 import BeautifulSoup
import re
from typing import Union
import pyprind
from zwickyverse import Private
import glob
date_type = Union[datetime.datetime, float]
''' load secrets '''
with open('./secrets.json') as sjson:
secrets = json.load(sjson)
def jd_to_date(jd):
"""
Convert Julian Day to date.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
jd : float
Julian Day
Returns
-------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Examples
--------
Convert Julian Day 2446113.75 to year, month, and day.
>>> jd_to_date(2446113.75)
(1985, 2, 17.25)
"""
jd += 0.5
F, I = math.modf(jd)
I = int(I)
A = math.trunc((I - 1867216.25) / 36524.25)
if I > 2299160:
B = I + 1 + A - math.trunc(A / 4.)
else:
B = I
C = B + 1524
D = math.trunc((C - 122.1) / 365.25)
E = math.trunc(365.25 * D)
G = math.trunc((C - E) / 30.6001)
day = C - E + F - math.trunc(30.6001 * G)
if G < 13.5:
month = G - 1
else:
month = G - 13
if month > 2.5:
year = D - 4716
else:
year = D - 4715
return year, month, day
def jd2date(jd):
year, month, day = jd_to_date(jd)
return datetime.datetime(year, month, int(np.floor(day)))
def fetch_cutout(_id: str, date: date_type, _path_out: str='./', _v=False):
_base_url = f"{secrets['deep_asteroids_service']['protocol']}://" + \
f"{secrets['deep_asteroids_service']['host']}:{secrets['deep_asteroids_service']['port']}"
_base_url = os.path.join(_base_url, 'data/stamps')
if _v:
print(type(date))
if isinstance(date, datetime.datetime) or isinstance(date, datetime.date):
date_utc = date.strftime('%Y%m%d')
elif isinstance(date, float):
date_utc = jd2date(date).strftime('%Y%m%d')
try:
url = os.path.join(_base_url, f'stamps_{date_utc}/{_id}_scimref.jpg')
if _v:
print(url)
filename = os.path.join(_path_out, f'{_id}_scimref.jpg')
r = requests.get(url, timeout=10)
if r.status_code == 200:
with open(filename, 'wb') as f:
f.write(r.content)
return True
except Exception as e:
print(str(e))
return False
def fetch_real_streakids(date_start=datetime.datetime(2018, 5, 31),
date_end=datetime.datetime.utcnow(),
_path_out='./', _v: bool=True):
try:
# date_start = datetime.datetime(2018, 5, 31)
# date_start = datetime.datetime(2018, 11, 1)
# date_end = datetime.datetime.utcnow()
session = requests.Session()
session.auth = (secrets['yupana']['user'], secrets['yupana']['pwd'])
reals = dict()
for dd in range((date_end - date_start).days + 1):
date = (date_start + datetime.timedelta(days=dd)).strftime('%Y%m%d')
try:
url = secrets['yupana']['url']
result = session.get(url, params={'date': date})
if result.status_code == 200:
# print(result.content)
soup = BeautifulSoup(result.content, 'html.parser')
# cutouts = re.findall(r'stamps_(.*)//(strkid.*)_scimref', str(soup))
cutouts = re.findall(r'(strkid.*)_scimref', str(soup))
if _v:
print(date)
print(cutouts)
if len(cutouts) > 0:
reals[date] = cutouts
except Exception as e:
print(str(e))
json_filename = f'reals_{date_start.strftime("%Y%m%d")}_{date_end.strftime("%Y%m%d")}.json'
with open(os.path.join(_path_out, json_filename), 'w') as outfile:
json.dump(reals, outfile, sort_keys=True, indent=2)
real_ids = []
for date in reals:
real_ids += reals[date]
if _v:
print('\n', real_ids)
return {'status': 'success', 'path_json': os.path.join(_path_out, json_filename)}
except Exception as e:
print(str(e))
return {'status': 'failed'}
def fetch_reals(path_json, path_out='./', _v: bool=True):
with open(path_json, 'r') as f:
data = json.load(f)
dates = sorted(list(data.keys()))
_path_out = os.path.join(path_out, f'reals_{dates[0]}_{dates[-1]}')
if not os.path.exists(_path_out):
os.makedirs(_path_out)
if _v:
bar = pyprind.ProgBar(len(data), stream=1, title='Fetching real streaks...', monitor=True)
for date in data:
if len(data[date]) > 0:
if _v:
bar.update(iterations=1, item_id=date)
# print(date)
for streak_id in data[date]:
try:
# print(f'fetching {streak_id}')
fetch_cutout(streak_id, datetime.datetime.strptime(date, '%Y%m%d'), _path_out, _v=False)
except Exception as e:
print(str(e))
continue
def sample(date_start=datetime.datetime(2018, 5, 31),
date_end=datetime.datetime.utcnow(),
n_samples: int=1000,
path_out='./', _v: bool=True):
try:
jd_start = Time(date_start, format='datetime', scale='utc').jd
jd_end = Time(date_end, format='datetime', scale='utc').jd
if _v:
print(jd_start, jd_end)
client = pymongo.MongoClient(host=secrets['deep_asteroids_mongodb']['host'],
port=secrets['deep_asteroids_mongodb']['port'])
db = client['deep-asteroids']
db.authenticate(name=secrets['deep_asteroids_mongodb']['user'],
password=secrets['deep_asteroids_mongodb']['pwd'])
''' training sets for the rb classifiers (real/bogus) '''
rb_classifiers = ('rb_vgg6', 'rb_resnet50', 'rb_densenet121')
# rb > 0.8: n_samples cutouts
# high score by either of the classifiers in the family
high_rb_score = [{rb_classifier: {'$gt': 0.8}} for rb_classifier in rb_classifiers]
cursor = db['deep-asteroids'].aggregate([
{'$match': {'$and': [{'$or': high_rb_score},
{'jd': {'$gt': jd_start, '$lt': jd_end}}
]}},
{'$project': {'_id': 1, 'jd': 1}},
{'$sample': {'size': n_samples}}
], allowDiskUse=True)
streaks = list(cursor)
path = os.path.join(path_out, 'rb_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + '__rb_gt_0.8')
os.makedirs(path)
num_streaks = len(streaks)
if _v:
bar = pyprind.ProgBar(num_streaks, stream=1,
title='Fetching streaks for rb___rb_gt_0.8', monitor=True)
for si, streak in enumerate(streaks):
# print(f'fetching {streak["_id"]}: {si+1}/{num_streaks}')
fetch_cutout(streak['_id'], streak['jd'], path)
if _v:
bar.update(iterations=1)
# rb < 0.8: n_samples cutouts
# low score by either of the classifiers in the family
low_rb_score = [{classifier: {'$lt': 0.8}} for classifier in rb_classifiers]
cursor = db['deep-asteroids'].aggregate([
{'$match': {'$and': [{'$or': low_rb_score},
{'jd': {'$gt': jd_start, '$lt': jd_end}}
]}},
{'$project': {'_id': 1, 'jd': 1}},
{'$sample': {'size': n_samples}}
], allowDiskUse=True)
streaks = list(cursor)
path = os.path.join(path_out, 'rb_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + '__rb_lt_0.8')
os.makedirs(path)
num_streaks = len(streaks)
if _v:
bar = pyprind.ProgBar(num_streaks, stream=1,
title='Fetching streaks for rb___rb_lt_0.8', monitor=True)
for si, streak in enumerate(streaks):
# print(f'fetching {streak["_id"]}: {si+1}/{num_streaks}')
fetch_cutout(streak['_id'], streak['jd'], path)
if _v:
bar.update(iterations=1)
''' training sets for the sl classifier (short/long) '''
sl_classifiers = ('sl_vgg6', 'sl_resnet50', 'sl_densenet121')
# rb > 0.9, sl > 0.8: n_samples cutouts
high_rb_score = [{rb_classifier: {'$gt': 0.9}} for rb_classifier in rb_classifiers]
high_sl_score = [{sl_classifier: {'$gt': 0.8}} for sl_classifier in sl_classifiers]
cursor = db['deep-asteroids'].aggregate([
{'$match': {'$and': [{'$or': high_rb_score},
{'$or': high_sl_score},
{'jd': {'$gt': jd_start, '$lt': jd_end}}
]}},
{'$project': {'_id': 1, 'jd': 1}},
{'$sample': {'size': n_samples}}
], allowDiskUse=True)
streaks = list(cursor)
path = os.path.join(path_out, 'sl_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S') +
'__rb_gt_0.9__sl_gt_0.8')
os.makedirs(path)
num_streaks = len(streaks)
if _v:
bar = pyprind.ProgBar(num_streaks, stream=1,
title='Fetching streaks for sl__rb_gt_0.9__sl_gt_0.8', monitor=True)
for si, streak in enumerate(streaks):
# print(f'fetching {streak["_id"]}: {si+1}/{num_streaks}')
fetch_cutout(streak['_id'], streak['jd'], path)
if _v:
bar.update(iterations=1)
# rb > 0.9, sl < 0.8: n_samples cutouts
low_sl_score = [{sl_classifier: {'$lt': 0.8}} for sl_classifier in sl_classifiers]
cursor = db['deep-asteroids'].aggregate([
{'$match': {'$and': [{'$or': high_rb_score},
{'$or': low_sl_score},
{'jd': {'$gt': jd_start, '$lt': jd_end}}
]}},
{'$project': {'_id': 1, 'jd': 1}},
{'$sample': {'size': n_samples}}
], allowDiskUse=True)
streaks = list(cursor)
path = os.path.join(path_out, 'sl_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S') +
'__rb_gt_0.9__sl_lt_0.8')
os.makedirs(path)
num_streaks = len(streaks)
if _v:
bar = pyprind.ProgBar(num_streaks, stream=1,
title='Fetching streaks for sl__rb_gt_0.9__sl_lt_0.8', monitor=True)
for si, streak in enumerate(streaks):
# print(f'fetching {streak["_id"]}: {si+1}/{num_streaks}')
fetch_cutout(streak['_id'], streak['jd'], path)
if _v:
bar.update(iterations=1)
''' training sets for the kd classifier (keep/ditch) '''
kd_classifiers = ('kd_vgg6', 'kd_resnet50')
# rb > 0.9, sl > 0.9, kd > 0.8: n_samples cutouts
high_rb_score = [{rb_classifier: {'$gt': 0.9}} for rb_classifier in rb_classifiers]
high_sl_score = [{sl_classifier: {'$gt': 0.9}} for sl_classifier in sl_classifiers]
high_kd_score = [{kd_classifier: {'$gt': 0.8}} for kd_classifier in kd_classifiers]
cursor = db['deep-asteroids'].aggregate([
{'$match': {'$and': [{'$or': high_rb_score},
{'$or': high_sl_score},
{'$or': high_kd_score},
{'jd': {'$gt': jd_start, '$lt': jd_end}}
]}},
{'$project': {'_id': 1, 'jd': 1}},
{'$sample': {'size': n_samples}}
], allowDiskUse=True)
streaks = list(cursor)
path = os.path.join(path_out, 'kd_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S') +
'__rb_gt_0.9__sl_gt_0.9__kd_gt_0.8')
os.makedirs(path)
num_streaks = len(streaks)
if _v:
bar = pyprind.ProgBar(num_streaks, stream=1,
title='Fetching streaks for kd__rb_gt_0.9__sl_gt_0.9__kd_gt_0.8', monitor=True)
for si, streak in enumerate(streaks):
# print(f'fetching {streak["_id"]}: {si+1}/{num_streaks}')
fetch_cutout(streak['_id'], streak['jd'], path)
if _v:
bar.update(iterations=1)
# rb > 0.9, sl > 0.9, sl < 0.8: n_samples cutouts
low_kd_score = [{kd_classifier: {'$lt': 0.8}} for kd_classifier in kd_classifiers]
cursor = db['deep-asteroids'].aggregate([
{'$match': {'$and': [{'$or': high_rb_score},
{'$or': high_sl_score},
{'$or': low_kd_score},
{'jd': {'$gt': jd_start, '$lt': jd_end}}
]}},
{'$project': {'_id': 1, 'jd': 1}},
{'$sample': {'size': n_samples}}
], allowDiskUse=True)
streaks = list(cursor)
path = os.path.join(path_out, 'kd_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S') +
'__rb_gt_0.9__sl_gt_0.9__kd_lt_0.8')
os.makedirs(path)
num_streaks = len(streaks)
if _v:
bar = pyprind.ProgBar(num_streaks, stream=1,
title='Fetching streaks for kd__rb_gt_0.9__sl_gt_0.9__kd_lt_0.8', monitor=True)
for si, streak in enumerate(streaks):
# print(f'fetching {streak["_id"]}: {si+1}/{num_streaks}')
fetch_cutout(streak['_id'], streak['jd'], path)
if _v:
bar.update(iterations=1)
''' training sets for the os classifiers (one-shot real/bogus) '''
os_classifiers = ('os_vgg6', 'os_resnet50', 'os_densenet121')
# os > 0.8: n_samples cutouts
# high score by either of the classifiers in the family
high_os_score = [{os_classifier: {'$gt': 0.8}} for os_classifier in os_classifiers]
cursor = db['deep-asteroids'].aggregate([
{'$match': {'$and': [{'$or': high_os_score},
{'jd': {'$gt': jd_start, '$lt': jd_end}}
]}},
{'$project': {'_id': 1, 'jd': 1}},
{'$sample': {'size': n_samples}}
], allowDiskUse=True)
streaks = list(cursor)
path = os.path.join(path_out, 'os_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + '__os_gt_0.8')
os.makedirs(path)
num_streaks = len(streaks)
if _v:
bar = pyprind.ProgBar(num_streaks, stream=1,
title='Fetching streaks for os___os_gt_0.8', monitor=True)
for si, streak in enumerate(streaks):
# print(f'fetching {streak["_id"]}: {si+1}/{num_streaks}')
fetch_cutout(streak['_id'], streak['jd'], path)
if _v:
bar.update(iterations=1)
# os < 0.8: n_samples cutouts
# low score by either of the classifiers in the family
low_os_score = [{classifier: {'$lt': 0.8}} for classifier in os_classifiers]
cursor = db['deep-asteroids'].aggregate([
| |
Keras | neural network (sieci neuronowe)
# Feed these vectors into a neural network classifier
# basic layer architecture (1)
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=200)) # 'sigmoid'
model.add(Dense(1, activation='sigmoid')) # 'softmax'
model.compile(optimizer='rmsprop', # 'adam', 'sgd'
loss='binary_crossentropy',
metrics=['accuracy'])
early_stoping = EarlyStopping(patience=3, monitor = "val_loss")
# Train the model, iterating on the data in batches of 32 samples
model.fit(train_vecs_w2v, yw_train, epochs=20, batch_size=32, verbose=1,
callbacks=None, validation_split=0.0, validation_data=None)
print(model.summary())
# Tweet sentiment classifier using word2vec and Keras - combination of these two tools resulted in a ~82% classification model accuracy
model.evaluate(test_vecs_w2v, yw_test, verbose=2)
print("Accuracy: %.2f%%" % (model.evaluate(test_vecs_w2v, yw_test, verbose=2)[1]*100))
model.predict(test_vecs_w2v, batch_size=32, verbose=0)
# basic layer architecture (2) - worser results
from keras.models import Sequential
from keras.layers import Dense, Activation, LSTM
model = Sequential()
model.add(Dense(32, activation='sigmoid', input_dim=200))
model.add(Dense(1, activation='softmax'))
model.compile(optimizer='sgd', # 'adam', 'sgd', 'rmsprop'
loss='binary_crossentropy',
metrics=['accuracy'])
# Train the model, iterating on the data in batches of 32 samples
model.fit(train_vecs_w2v, yw_train, epochs=10, batch_size=32, verbose=1,
callbacks=None, validation_split=0.0, validation_data=None)
print(model.summary())
model.evaluate(test_vecs_w2v, yw_test, verbose=2)
print("Accuracy: %.2f%%" % (model.evaluate(test_vecs_w2v, yw_test, verbose=2)[1]*100))
# Training models/predicting tweet's positive or negative sentiment
# Trying to choose the best algorithm
# Making another copy of tweets data frame
tweets3 = tweets.copy(deep=True)
tweets3.head(2)
tweets3.shape
# Binary classification of a tweet
tweets3['sentiment'] = tweets3['sentiment'].replace({-1.0: 0, 0.0: 0, 1.0: 1})
tweets3['sentiment'][:10]
# Cross validation - to select the model
# Testing several algorithms using its default parameters
# Applying 10-fold cross validation on the training set to select the best method
# Later on, using the grid search approach for parameters tuning
# Building and evaluating models for each combination of algorithm parameters
# Creating list for best algorithms using its default parameters
ScoreSummaryByModel = list()
# Function for model evaluation - based on 'cleaned', but not tokenized tweets
def ModelEvaluation (model,comment):
scoring = 'accuracy'
pipeline = Pipeline([('vect', CountVectorizer())
, ('tfidf', TfidfTransformer())
, ('model', model)])
scores = cross_val_score(pipeline, tweets3['cleaning'], tweets3['sentiment'], cv=10, scoring=scoring) #cv=kfold
mean = scores.mean()
std = scores.std()
#The mean score and the 95% confidence interval of the score estimate (accuracy)
ScoreSummaryByModel.append([comment,mean, std, "%0.3f (+/- %0.3f)" % (mean, std * 2)])
print("Accuracy: %0.3f (+/- %0.3f)" % (scores.mean(), scores.std() * 2))
ModelEvaluation (MultinomialNB(),'Naive Bayes classifier')
ModelEvaluation (BernoulliNB(binarize=0.0),'Bernoulli Naive Bayes')
ModelEvaluation (SVC(kernel='linear'),'SVC, linear kernel')
ModelEvaluation (LinearSVC(),'LinearSVC')
ModelEvaluation (SGDClassifier(),'SGD')
# Below is the summary. LinearSVC and SVC (linear kernel) with default parameters returned the highest accuracy
df_ScoreSummaryByModel=DataFrame(ScoreSummaryByModel,columns=['Method','Mean','Std','Accuracy'])
df_ScoreSummaryByModel.sort_values(['Mean'],ascending=False,inplace=True)
df_ScoreSummaryByModel
# GridSearchCV - parameters tuning for classifiers
# Using the grid search approach for parameters tuning
# Building and evaluating models for each combination of algorithm parameters
ScoreSummaryByModelParams = list()
# Function for optimizing parameters - based on 'cleaned', but not tokenized tweets
def ModelParamsEvaluation (vectorizer,model,params,comment):
best_params = []
scoring = 'accuracy'# 'f1'
pipeline = Pipeline([
('vect', vectorizer),
('tfidf', TfidfTransformer()),
('clf', model),
])
# Finding the best parameters
grid_search = GridSearchCV(estimator=pipeline, param_grid=params, verbose=1, scoring=scoring) #cv=5, refit=True)
grid_search.fit(tweets3['cleaning'], tweets3['sentiment'])
best_params.append(grid_search.best_params_)
print("Best score: {0}".format(grid_search.best_score_))
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(params.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
ScoreSummaryByModelParams.append([comment,grid_search.best_score_,"\t%s: %r" % (param_name, best_parameters[param_name])])
#pipeline.set_params(**best_parameters)
# Bernoulli Naive Bayes
p = {'vect__analyzer':('char', 'char_wb'),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__ngram_range': ((2, 2), (3, 3)),
'clf__alpha': (1,0.1,0.01,0.001,0.0001,0)}
ModelParamsEvaluation(CountVectorizer(),BernoulliNB(),p,'Bernoulli Naive Bayes')
# 3 chars is almost a word. The score is lower then for word analyzer
# Bernoulli Naive Bayes, analyzer=word
p = {
'vect__max_df': (0.5, 0.75, 1.0),
'vect__ngram_range': ((1, 1), (3, 3), (5,5),(2,5)),
'clf__alpha': (1,0.1,0.01,0.001,0.0001,0)}
ModelParamsEvaluation(CountVectorizer(analyzer='word'),BernoulliNB(),p,'Bernoulli Naive Bayes, analyzer=word')
# Tweets are short messages, therefore unigrams make sense. Using unigrams seems to be the best approach
# LinearSVC
p = {'vect__analyzer':('char', 'char_wb'),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__ngram_range': ((2, 2), (3, 3)),
'clf__C': (1,0.1,0.01,0.001,0.0001)
}
ModelParamsEvaluation(CountVectorizer(),LinearSVC(),p,'LinearSVC')
# LinearSVC, analyzer=word
p = {
'vect__max_df': (0.5, 0.75, 1.0),
'vect__ngram_range': ((1, 1), (3, 3), (5,5),(2,5)),
'clf__C': (1,0.1,0.01,0.001,0.0001)
}
ModelParamsEvaluation(CountVectorizer(analyzer='word'),LinearSVC(),p,'LinearSVC analyzer=word')
# SVC, linear kernel
p = {'vect__analyzer':('char', 'char_wb'),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__ngram_range': ((2, 2), (3, 3)),
'clf__C': (1,0.1,0.01,0.001,0.0001)}
ModelParamsEvaluation (CountVectorizer(),SVC(kernel='linear'),p,'SVC, linear kernel, char')
# SVC, linear kernel, analyzer=word
p = {
'vect__max_df': (0.5, 0.75, 1.0),
'vect__ngram_range': ((1, 1), (3, 3), (5,5),(2,5)),
'clf__C': (1,0.1,0.01,0.001,0.0001)}
ModelParamsEvaluation (CountVectorizer(analyzer='word'),SVC(kernel='linear'),p,'SVC, linear kernel, analyzer=word')
# SGDClassifier
p = {'vect__analyzer':('char', 'char_wb'),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__ngram_range': ((2, 2), (3, 3)),
'clf__alpha': (0.01,0.001,0.0001,0.00001, 0.000001),
'clf__penalty': ('l1','l2', 'elasticnet')}
ModelParamsEvaluation (CountVectorizer(),SGDClassifier(),p,'SGD Classifier, analyzer=char')
# SGDClassifier, analyzer='word'
p = {
'vect__max_df': (0.5, 0.75, 1.0),
'vect__ngram_range': ((1, 1), (3, 3), (5,5),(2,5)),
'clf__alpha': (0.01,0.001,0.0001,0.00001, 0.000001),
'clf__penalty': ('l1','l2', 'elasticnet')}
ModelParamsEvaluation (CountVectorizer(analyzer='word'),SGDClassifier(),p,'SGD Classifier, analyzer=word')
# Below is the summary:
# The highest 97% accuracy returned:
# - SGDClassifier (alpha: 1e-05, char 1-ngram)
# - LinearSVC (C=1, analyzer=word, char 1-ngram)
# - SVC with linear kernel (C=1, analyzer=word, char 1-ngram)
# Long running time of models
df_ScoreSummaryByModelParams=DataFrame(ScoreSummaryByModelParams,columns=['Method','BestScore','BestParameter'])
df_ScoreSummaryByModelParams.sort_values(['BestScore'],ascending=False,inplace=True)
df_ScoreSummaryByModelParams
# Let's apply the discovered best approach to test data set
# Using SGDClassifier as best model
# Apply some score metrics
tweet_train, tweet_test, sentiment_train, sentiment_test = train_test_split(tweets3['cleaning'], tweets3['sentiment'], test_size=0.25, random_state=42)
print(tweet_train.shape, tweet_test.shape, sentiment_train.shape, sentiment_test.shape)
#The best result for SGD classifier, 1-1 n-grams
sgd_pipeline = Pipeline([
('bow', CountVectorizer(analyzer='word',ngram_range=(1, 1), max_df=0.5)),
('tfidf', TfidfTransformer()),
('classifier', SGDClassifier(alpha=1e-05, penalty="l1"))])
sgd_pipeline.fit(tweet_train, sentiment_train)
predictions = sgd_pipeline.predict(tweet_test)
# Function for metrics
def PredictionEvaluation(sentiment_test,sentiment_predictions):
print ('Precision: %0.3f' % (precision_score(sentiment_test,sentiment_predictions)))
print ('Accuracy: %0.3f' % (accuracy_score(sentiment_test,sentiment_predictions)))
print ('Recall: %0.3f' % (recall_score(sentiment_test,sentiment_predictions)))
print ('F1: %0.3f' % (f1_score(sentiment_test,sentiment_predictions)))
print ('Confussion matrix:')
print (confusion_matrix(sentiment_test,sentiment_predictions))
print ('ROC-AUC: %0.3f' % (roc_auc_score(sentiment_test,sentiment_predictions)))
PredictionEvaluation(sentiment_test,predictions)
sgd_pipeline.predict(["bezrobocie spada"])[0]
plot_confusion_matrix(confusion_matrix(sentiment_test, sgd_pipeline.predict(tweet_test)),
classes=['Negative','Positive'],
title='Confusion matrix, without normalization')
# Final tuning parameters by pipeline - SGD Classifier
# based on 'cleaned', but not tokenized tweets
X1 = tweets3['cleaning']
y1 = tweets3['sentiment']
X1_train = X[:14000]
X1_validate = X[14000:18000]
X1_test = X[18000:]
y1_train = y[:14000]
y1_validate = y[14000:18000]
y1_test = y[18000:]
pipeline1 = Pipeline([
('vect', CountVectorizer(analyzer='word')),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
parameters1 = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l1', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
# Find the best parameters for both the feature extraction and the classifier
# GridSearchCV - parameters tuning for choosen classifier #(szukanie optymalnej kombinacji parametrów)
grid_search1 = GridSearchCV(pipeline1, parameters1, n_jobs=2, verbose=1,
scoring='accuracy')
grid_search1.fit(X1_validate, y1_validate)
# The best parameters passed back to the pipeline
print ('The best model results: %0.3f' % grid_search1.best_score_)
best_parameters1 = grid_search1.best_estimator_.get_params()
for param_name in sorted(parameters1.keys()):
print(param_name, best_parameters1[param_name])
pipeline1.set_params(**best_parameters1)
# Train model on train data X_train and Y_train
pipeline1.fit(X1_train, y1_train)
y1_pred = pipeline1.predict(X1_test) # Predicting labels on 10% test data (przewidujemy etykiety na 10% zbiorze testowym)
# Calculate precision, recall, F1-score
print(metrics.classification_report(y1_test, y1_pred, digits=3))
#print(confusion_matrix(y1_test,pipeline1.predict(X1_test)))
#print ('ROC-AUC: %0.3f' % (roc_auc_score(y1_test,y1_pred)))
PredictionEvaluation(y1_test,y1_pred)
pipeline1.predict(["bezrobocie spada w Polsce"])[0]
# Final tuning parameters by pipeline - LinearSVC
# based on 'cleaned', but not tokenized tweets
X2 = tweets3['cleaning']
y2 = tweets3['sentiment']
X2_train = X[:14000]
X2_validate = X[14000:18000]
X2_test = X[18000:]
y2_train = y[:14000]
y2_validate = y[14000:18000]
y2_test = y[18000:]
pipeline2 = Pipeline([
('vect', TfidfVectorizer()),
('sel', SelectPercentile()),
('clf', LinearSVC())
])
parameters2 = {
'vect__max_df': (0.25, 0.5),
'vect__ngram_range': ((1, 1), (1, 2), (1,3)),
'vect__use_idf': (True, False),
'sel__percentile': (10,30,50,100),
'clf__C': (0.01, 1, 10),
'clf__class_weight': ('balanced',None),
}
# Find the best parameters for both the feature extraction and the classifier
# GridSearchCV - parameters tuning for choosen classifier #(szukanie optymalnej kombinacji parametrów)
grid_search2 = GridSearchCV(pipeline2, parameters2, n_jobs=2, verbose=1,
scoring='accuracy')
# The best parameters passed back to the pipeline
grid_search2.fit(X2_validate, y2_validate)
print('The best model results: %0.3f' % grid_search2.best_score_)
best_parameters2 = grid_search2.best_estimator_.get_params()
for param_name in sorted(parameters2.keys()):
print(param_name, best_parameters2[param_name])
pipeline2.set_params(**best_parameters2)
# Train model on train data X_train and Y_train
pipeline2.fit(X2_train, y2_train)
y2_pred = pipeline2.predict(X2_test) # Predicting labels on 10% test data (przewidujemy etykiety na 10% zbiorze testowym)
# Calculate precision, recall, F1-score
print(metrics.classification_report(y2_test, y2_pred, digits=3))
#print(confusion_matrix(y2_test,pipeline2.predict(X2_test)))
#print ('ROC-AUC: %0.3f' % (roc_auc_score(y2_test,y2_pred)))
PredictionEvaluation(y2_test,y2_pred)
# Final tuning parameters by pipeline - SGD Classifier (only on train, test data set) + cross validation
# based on 'cleaned', but not tokenized tweets
msg_train, msg_test, label_train, label_test = train_test_split(tweets3['cleaning'], tweets3['sentiment'],
test_size=0.2, random_state=42)
print(len(msg_train), len(msg_test), len(msg_train) + len(msg_test))
pipeline_sgd = Pipeline([
('vect', CountVectorizer(analyzer='word')),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# pipeline parameters to automatically explore and tune
parameters_sgd = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l1', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
results_sgd = []
cv_results_sgd = cross_val_score(pipeline_sgd,
msg_train, # training data
label_train, # training labels
cv=10, # split data randomly into 10 parts: 9 for training, 1 for scoring or cv=kfold
scoring='accuracy', # scoring metric
n_jobs=-1, # -1 = use all cores = faster
)
results_sgd.append(cv_results_sgd)
#print(cv_results_sgd)
msg_sgd = "%f (%f)" % (cv_results_sgd.mean(), cv_results_sgd.std())
#print(msg_sgd)
grid_sgd = GridSearchCV(
pipeline_sgd, # pipeline from above
param_grid=parameters_sgd, # parameters to tune via cross validation
refit=True, # fit using all data, on the best detected classifier
n_jobs=-1, # number of cores to use for parallelization; -1 for "all cores"
scoring='accuracy', # scores we sre optimizing
cv=StratifiedKFold(label_train, n_folds=5), # type | |
], "url":"Limiter", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"Limiter", "name":"Limiter", "params":\n'
' [ \n'
' { "key":"type", "type":"enum", "default":"SoftLimit", "enum":\n'
' [ "SoftLimit", "HardLimit", "SoftClip", "HardClip" ] },\n'
' { "key":"gain-L", "type":"double", "default":0 },\n'
' { "key":"gain-R", "type":"double", "default":0 },\n'
' { "key":"thresh", "type":"double", "default":0 },\n'
' { "key":"hold", "type":"double", "default":0 },\n'
' { "key":"makeup", "type":"enum", "default":"No", "enum":\n'
' [ "No", "Yes" ] } ], "url":"Limiter", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"NyquistPlug-inInstaller", \n'
' "name":"Nyquist Plug-in Installer", "params":\n'
' [ \n'
' { "key":"plug-in", "type":"string", "default":"" } ], \n'
' "url":"Nyquist_Plug-in_Installer", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"RegularIntervalLabels", \n'
' "name":"Regular Interval Labels", "params":\n'
' [ \n'
' { "key":"mode", "type":"enum", "default":"Both", "enum":\n'
' [ "Both", "Number", "Interval" ] },\n'
' { "key":"totalnum", "type":"int", "default":0 },\n'
' { "key":"interval", "type":"double", "default":0 },\n'
' { "key":"region", "type":"double", "default":0 },\n'
' { "key":"adjust", "type":"enum", "default":"No", "enum":\n'
' [ "No", "Yes" ] },\n'
' { "key":"labeltext", "type":"string", "default":"" },\n'
' { "key":"zeros", "type":"enum", "default":"TextOnly", "enum":\n'
' [ "TextOnly", "OneBefore", "TwoBefore", "ThreeBefore", '
'"OneAfter", "TwoAfter", "ThreeAfter" ] },\n'
' { "key":"firstnum", "type":"int", "default":0 },\n'
' { "key":"verbose", "type":"enum", "default":"Details", "enum":\n'
' [ "Details", "Warnings", "None" ] } ], \n'
' "url":"Regular_Interval_Labels", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"RegularIntervalLabels", \n'
' "name":"Regular Interval Labels", "params":\n'
' [ \n'
' { "key":"mode", "type":"enum", "default":"Both", "enum":\n'
' [ "Both", "Number", "Interval" ] },\n'
' { "key":"totalnum", "type":"int", "default":0 },\n'
' { "key":"interval", "type":"double", "default":0 },\n'
' { "key":"region", "type":"double", "default":0 },\n'
' { "key":"adjust", "type":"enum", "default":"No", "enum":\n'
' [ "No", "Yes" ] },\n'
' { "key":"labeltext", "type":"string", "default":"" },\n'
' { "key":"zeros", "type":"enum", "default":"TextOnly", "enum":\n'
' [ "TextOnly", "OneBefore", "TwoBefore", "ThreeBefore", '
'"OneAfter", "TwoAfter", "ThreeAfter" ] },\n'
' { "key":"firstnum", "type":"int", "default":0 },\n'
' { "key":"verbose", "type":"enum", "default":"Details", "enum":\n'
' [ "Details", "Warnings", "None" ] } ], \n'
' "url":"Regular_Interval_Labels", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"SampleDataExport", \n'
' "name":"Sample Data Export", "params":\n'
' [ \n'
' { "key":"number", "type":"int", "default":0 },\n'
' { "key":"units", "type":"enum", "default":"dB", "enum":\n'
' [ "dB", "Linear" ] },\n'
' { "key":"filename", "type":"string", "default":"" },\n'
' { "key":"fileformat", "type":"enum", "default":"None", "enum":\n'
' [ "None", "Count", "Time" ] },\n'
' { "key":"header", "type":"enum", "default":"None", "enum":\n'
' [ "None", "Minimal", "Standard", "All" ] },\n'
' { "key":"optext", "type":"string", "default":"" },\n'
' { "key":"channel-layout", "type":"enum", "default":"SameLine", '
'"enum":\n'
' [ "SameLine", "Alternate", "LFirst" ] },\n'
' { "key":"messages", "type":"enum", "default":"Yes", "enum":\n'
' [ "Yes", "Errors", "None" ] } ], \n'
' "url":"Sample_Data_Export", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"SampleDataExport", \n'
' "name":"Sample Data Export", "params":\n'
' [ \n'
' { "key":"number", "type":"int", "default":0 },\n'
' { "key":"units", "type":"enum", "default":"dB", "enum":\n'
' [ "dB", "Linear" ] },\n'
' { "key":"filename", "type":"string", "default":"" },\n'
' { "key":"fileformat", "type":"enum", "default":"None", "enum":\n'
' [ "None", "Count", "Time" ] },\n'
' { "key":"header", "type":"enum", "default":"None", "enum":\n'
' [ "None", "Minimal", "Standard", "All" ] },\n'
' { "key":"optext", "type":"string", "default":"" },\n'
' { "key":"channel-layout", "type":"enum", "default":"SameLine", '
'"enum":\n'
' [ "SameLine", "Alternate", "LFirst" ] },\n'
' { "key":"messages", "type":"enum", "default":"Yes", "enum":\n'
' [ "Yes", "Errors", "None" ] } ], \n'
' "url":"Sample_Data_Export", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"SampleDataImport", \n'
' "name":"Sample Data Import", "params":\n'
' [ \n'
' { "key":"filename", "type":"string", "default":"" },\n'
' { "key":"bad-data", "type":"enum", "default":"ThrowError", '
'"enum":\n'
' [ "ThrowError", "ReadAsZero" ] } ], \n'
' "url":"Sample_Data_Import", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"SampleDataImport", \n'
' "name":"Sample Data Import", "params":\n'
' [ \n'
' { "key":"filename", "type":"string", "default":"" },\n'
' { "key":"bad-data", "type":"enum", "default":"ThrowError", '
'"enum":\n'
' [ "ThrowError", "ReadAsZero" ] } ], \n'
' "url":"Sample_Data_Import", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"StudioFadeOut", \n'
' "name":"Studio Fade Out", "params":\n'
' [ ], \n'
' "url":"Fades#studio_fadeout", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"StudioFadeOut", \n'
' "name":"Studio Fade Out", "params":\n'
' [ ], \n'
' "url":"Fades#studio_fadeout", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"Tremolo", "name":"Tremolo", "params":\n'
' [ \n'
' { "key":"wave", "type":"enum", "default":"Sine", "enum":\n'
' [ "Sine", "Triangle", "Sawtooth", \n'
' "InverseSawtooth", "Square" ] },\n'
' { "key":"phase", "type":"int", "default":0 },\n'
' { "key":"wet", "type":"int", "default":0 },\n'
' { "key":"lfo", "type":"double", "default":0 } ], '
'"url":"Tremolo", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"Tremolo", "name":"Tremolo", "params":\n'
' [ \n'
' { "key":"wave", "type":"enum", "default":"Sine", "enum":\n'
' [ "Sine", "Triangle", "Sawtooth", \n'
' "InverseSawtooth", "Square" ] },\n'
' { "key":"phase", "type":"int", "default":0 },\n'
' { "key":"wet", "type":"int", "default":0 },\n'
' { "key":"lfo", "type":"double", "default":0 } ], '
'"url":"Tremolo", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"VocalRemover", "name":"Vocal Remover", "params":\n'
' [ \n'
' { "key":"action", "type":"enum", "default":"Remove Vocals", '
'"enum":\n'
' [ "Remove Vocals", "View Help" ] },\n'
' { "key":"band-choice", "type":"enum", "default":"Simple", '
'"enum":\n'
' [ "Simple", "Remove", "Retain" ] },\n'
' { "key":"low-range", "type":"double", "default":0 },\n'
' { "key":"high-range", "type":"double", "default":0 } ], '
'"url":"Vocal_Remover", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"VocalRemover", "name":"Vocal Remover", "params":\n'
' [ \n'
' { "key":"action", "type":"enum", "default":"Remove Vocals", '
'"enum":\n'
' [ "Remove Vocals", "View Help" ] },\n'
' { "key":"band-choice", "type":"enum", "default":"Simple", '
'"enum":\n'
' [ "Simple", "Remove", "Retain" ] },\n'
' { "key":"low-range", "type":"double", "default":0 },\n'
' { "key":"high-range", "type":"double", "default":0 } ], '
'"url":"Vocal_Remover", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"RissetDrum", "name":"<NAME>", "params":\n'
' [ \n'
' { "key":"freq", "type":"double", "default":0 },\n'
' { "key":"decay", "type":"double", "default":0 },\n'
' { "key":"cf", "type":"double", "default":0 },\n'
' { "key":"bw", "type":"double", "default":0 },\n'
' { "key":"noise", "type":"double", "default":0 },\n'
' { "key":"gain", "type":"double", "default":0 } ], '
'"url":"Risset_Drum", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"RissetDrum", "name":"<NAME>", "params":\n'
' [ \n'
' { "key":"freq", "type":"double", "default":0 },\n'
' { "key":"decay", "type":"double", "default":0 },\n'
' { "key":"cf", "type":"double", "default":0 },\n'
' { "key":"bw", "type":"double", "default":0 },\n'
' { "key":"noise", "type":"double", "default":0 },\n'
' { "key":"gain", "type":"double", "default":0 } ], '
'"url":"Risset_Drum", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"CompareAudio", "name":"Compare Audio", "params":\n'
' [ \n'
' { "key":"Threshold", "type":"float", "default":0 } ], \n'
' "url":"Extra_Menu:_Scriptables_II#compare_Audio", \n'
' "tip":"Compares a range on two tracks." },\n'
' { "id":"Demo", "name":"Demo", "params":\n'
' [ \n'
' { "key":"Delay", "type":"float", "default":1 },\n'
' { "key":"Decay", "type":"float", "default":0.5 } ], \n'
' "url":"Extra_Menu:_Scriptables_I", \n'
' "tip":"Does the demo action." },\n'
' { "id":"Drag", "name":"Drag", "params":\n'
' [ \n'
' { "key":"Id", "type":"int", "default":"unchanged" },\n'
' { "key":"Window", "type":"string", "default":"unchanged" },\n'
' { "key":"FromX", "type":"double", "default":"unchanged" },\n'
' { "key":"FromY", "type":"double", "default":"unchanged" },\n'
' { "key":"ToX", "type":"double", "default":"unchanged" },\n'
' { "key":"ToY", "type":"double", "default":"unchanged" },\n'
' { "key":"RelativeTo", "type":"enum", "default":"unchanged", '
'"enum":\n'
' [ "Panel", "App", "Track0", "Track1" ] } ], \n'
' "url":"Extra_Menu:_Scriptables_II#move_mouse", \n'
' "tip":"Drags mouse from one place to another." },\n'
' { "id":"Export2", "name":"Export2", "params":\n'
' [ \n'
' { "key":"Filename", | |
# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All rights reserved.
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
linegen
~~~~~~~
An advanced line generation tool using Pango for proper text shaping. The
actual drawing code was adapted from the create_image utility from nototools
available at [0].
Line degradation uses a local model described in [1].
[0] https://github.com/googlei18n/nototools
[1] <NAME>, et al. "A statistical, nonparametric methodology for document degradation model validation." IEEE Transactions on Pattern Analysis and Machine Intelligence 22.11 (2000): 1209-1223.
"""
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.measurements import find_objects
from scipy.ndimage.morphology import distance_transform_cdt, binary_closing
from scipy.ndimage.interpolation import affine_transform, geometric_transform
from PIL import Image, ImageOps
from typing import AnyStr
import logging
import ctypes
import ctypes.util
import numpy as np
from kraken.lib.exceptions import KrakenCairoSurfaceException
from kraken.lib.util import pil2array, array2pil
logger = logging.getLogger(__name__)
pc_lib = ctypes.util.find_library('pangocairo-1.0')
p_lib = ctypes.util.find_library('pango-1.0')
c_lib = ctypes.util.find_library('cairo')
if pc_lib is None:
raise ImportError('Couldnt load pangocairo line generator dependency. Please install pangocairo, pango, and cairo.')
if p_lib is None:
raise ImportError('Couldnt load pango line generator dependency. Please install pangocairo, pango, and cairo.')
if c_lib is None:
raise ImportError('Couldnt load cairo line generator dependency. Please install pangocairo, pango, and cairo.')
pangocairo = ctypes.CDLL(pc_lib)
pango = ctypes.CDLL(p_lib)
cairo = ctypes.CDLL(c_lib)
__all__ = ['LineGenerator', 'ocropy_degrade', 'degrade_line', 'distort_line']
class CairoSurface(ctypes.Structure):
pass
class CairoContext(ctypes.Structure):
pass
class PangoFontDescription(ctypes.Structure):
pass
class PangoLanguage(ctypes.Structure):
pass
class PangoLayout(ctypes.Structure):
pass
class PangoContext(ctypes.Structure):
pass
class PangoRectangle(ctypes.Structure):
_fields_ = [('x', ctypes.c_int),
('y', ctypes.c_int),
('width', ctypes.c_int),
('height', ctypes.c_int)]
class ensureBytes(object):
"""
Simple class ensuring the arguments of type char * are actually a series of
bytes.
"""
@classmethod
def from_param(cls, value: AnyStr) -> bytes:
if isinstance(value, bytes):
return value
else:
return value.encode('utf-8')
cairo.cairo_create.argtypes = [ctypes.POINTER(CairoSurface)]
cairo.cairo_create.restype = ctypes.POINTER(CairoContext)
cairo.cairo_destroy.argtypes = [ctypes.POINTER(CairoContext)]
cairo.cairo_image_surface_create.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int]
cairo.cairo_image_surface_create.restype = ctypes.POINTER(CairoSurface)
cairo.cairo_surface_destroy.argtypes = [ctypes.POINTER(CairoSurface)]
cairo.cairo_image_surface_get_data.restype = ctypes.c_void_p
cairo.cairo_set_source_rgb.argtypes = [ctypes.POINTER(CairoContext), ctypes.c_double, ctypes.c_double, ctypes.c_double]
cairo.cairo_paint.argtypes = [ctypes.POINTER(CairoContext)]
pangocairo.pango_cairo_create_context.argtypes = [ctypes.POINTER(CairoContext)]
pangocairo.pango_cairo_create_context.restype = ctypes.POINTER(PangoContext)
pangocairo.pango_cairo_update_layout.argtypes = [ctypes.POINTER(CairoContext), ctypes.POINTER(PangoLayout)]
pangocairo.pango_cairo_show_layout.argtypes = [ctypes.POINTER(CairoContext), ctypes.POINTER(PangoLayout)]
pango.pango_language_from_string.argtypes = [ensureBytes] # type: ignore
pango.pango_language_from_string.restype = ctypes.POINTER(PangoLanguage)
pango.pango_context_set_language.argtypes = [ctypes.POINTER(PangoContext), ctypes.POINTER(PangoLanguage)]
pango.pango_font_description_new.restype = ctypes.POINTER(PangoFontDescription)
pango.pango_font_description_set_family.argtypes = [ctypes.POINTER(PangoFontDescription), ensureBytes] # type: ignore
pango.pango_font_description_set_size.argtypes = [ctypes.POINTER(PangoFontDescription), ctypes.c_int]
pango.pango_font_description_set_weight.argtypes = [ctypes.POINTER(PangoFontDescription), ctypes.c_uint]
pango.pango_layout_new.restype = ctypes.POINTER(PangoLayout)
pango.pango_layout_set_markup.argtypes = [ctypes.POINTER(PangoLayout), ensureBytes, ctypes.c_int] # type: ignore
pango.pango_layout_set_font_description.argtypes = [ctypes.POINTER(PangoLayout), ctypes.POINTER(PangoFontDescription)]
pango.pango_layout_get_context.argtypes = [ctypes.POINTER(PangoLayout)]
pango.pango_layout_get_context.restype = ctypes.POINTER(PangoContext)
pango.pango_layout_get_pixel_extents.argtypes = [ctypes.POINTER(PangoLayout), ctypes.POINTER(PangoRectangle), ctypes.POINTER(PangoRectangle)]
class LineGenerator(object):
"""
Produces degraded line images using a single collection of font families.
"""
def __init__(self, family='Sans', font_size=32, font_weight=400, language=None):
self.language = language
self.font = pango.pango_font_description_new()
# XXX: get PANGO_SCALE programatically from somewhere
logger.debug('Setting font {}, size {}, weight {}'.format(family, font_size, font_weight))
pango.pango_font_description_set_size(self.font, font_size * 1024)
pango.pango_font_description_set_family(self.font, family)
pango.pango_font_description_set_weight(self.font, font_weight)
def render_line(self, text):
"""
Draws a line onto a Cairo surface which will be converted to an pillow
Image.
Args:
text (unicode): A string which will be rendered as a single line.
Returns:
PIL.Image of mode 'L'.
Raises:
KrakenCairoSurfaceException if the Cairo surface couldn't be created
(usually caused by invalid dimensions.
"""
logger.info('Rendering line \'{}\''.format(text))
logger.debug('Creating temporary cairo surface')
temp_surface = cairo.cairo_image_surface_create(0, 0, 0)
width, height = _draw_on_surface(temp_surface, self.font, self.language, text)
cairo.cairo_surface_destroy(temp_surface)
if width == 0 or height == 0:
logger.error('Surface for \'{}\' zero pixels in at least one dimension'.format(text))
raise KrakenCairoSurfaceException('Surface zero pixels in at least one dimension', width, height)
logger.debug('Creating sized cairo surface')
real_surface = cairo.cairo_image_surface_create(0, width, height)
_draw_on_surface(real_surface, self.font, self.language, text)
logger.debug('Extracing data from real surface')
data = cairo.cairo_image_surface_get_data(real_surface)
size = int(4 * width * height)
buffer = ctypes.create_string_buffer(size)
ctypes.memmove(buffer, data, size)
logger.debug('Loading data into PIL image')
im = Image.frombuffer("RGBA", (width, height), buffer, "raw", "BGRA", 0, 1)
cairo.cairo_surface_destroy(real_surface)
logger.debug('Expand and grayscale image')
im = im.convert('L')
im = ImageOps.expand(im, 5, 255)
return im
def _draw_on_surface(surface, font, language, text):
logger.debug('Creating cairo and pangocairo contexts')
cr = cairo.cairo_create(surface)
pangocairo_ctx = pangocairo.pango_cairo_create_context(cr)
logger.debug('Creating pangocairo layout')
layout = pango.pango_layout_new(pangocairo_ctx)
pango_ctx = pango.pango_layout_get_context(layout)
if language is not None:
logger.debug('Setting language {} on context'.format(language))
pango_language = pango.pango_language_from_string(language)
pango.pango_context_set_language(pango_ctx, pango_language)
logger.debug('Setting font description on layout')
pango.pango_layout_set_font_description(layout, font)
logger.debug('Filling background of surface')
cairo.cairo_set_source_rgb(cr, 1.0, 1.0, 1.0)
cairo.cairo_paint(cr)
logger.debug('Typsetting text')
pango.pango_layout_set_markup(layout, text, -1)
logger.debug('Drawing text')
cairo.cairo_set_source_rgb(cr, 0.0, 0.0, 0.0)
pangocairo.pango_cairo_update_layout(cr, layout)
pangocairo.pango_cairo_show_layout(cr, layout)
cairo.cairo_destroy(cr)
logger.debug('Getting pixel extents')
ink_rect = PangoRectangle()
logical_rect = PangoRectangle()
pango.pango_layout_get_pixel_extents(layout, ctypes.byref(ink_rect), ctypes.byref(logical_rect))
return max(ink_rect.width, logical_rect.width), max(ink_rect.height, logical_rect.height)
def ocropy_degrade(im, distort=1.0, dsigma=20.0, eps=0.03, delta=0.3, degradations=((0.5, 0.0, 0.5, 0.0))):
"""
Degrades and distorts a line using the same noise model used by ocropus.
Args:
im (PIL.Image): Input image
distort (float):
dsigma (float):
eps (float):
delta (float):
degradations (list): list returning 4-tuples corresponding to
the degradations argument of ocropus-linegen.
Returns:
PIL.Image in mode 'L'
"""
w, h = im.size
# XXX: determine correct output shape from transformation matrices instead
# of guesstimating.
logger.debug('Pasting source image into canvas')
image = Image.new('L', (int(1.5*w), 4*h), 255)
image.paste(im, (int((image.size[0] - w) / 2), int((image.size[1] - h) / 2)))
a = pil2array(image.convert('L'))
logger.debug('Selecting degradations')
(sigma, ssigma, threshold, sthreshold) = degradations[np.random.choice(len(degradations))]
sigma += (2 * np.random.rand() - 1) * ssigma
threshold += (2 * np.random.rand() - 1) * sthreshold
a = a * 1.0 / np.amax(a)
if sigma > 0.0:
logger.debug('Apply Gaussian filter')
a = gaussian_filter(a, sigma)
logger.debug('Adding noise')
a += np.clip(np.random.randn(*a.shape) * 0.2, -0.25, 0.25)
logger.debug('Perform affine transformation and resize')
m = np.array([[1 + eps * np.random.randn(), 0.0], [eps * np.random.randn(), 1.0 + eps * np.random.randn()]])
w, h = a.shape
c = np.array([w / 2.0, h / 2])
d = c - np.dot(m, c) + np.array([np.random.randn() * delta, np.random.randn() * delta])
a = affine_transform(a, m, offset=d, order=1, mode='constant', cval=a[0, 0])
a = np.array(a > threshold, 'f')
[[r, c]] = find_objects(np.array(a == 0, 'i'))
r0 = r.start
r1 = r.stop
c0 = c.start
c1 = c.stop
a = a[r0 - 5:r1 + 5, c0 - 5:c1 + 5]
if distort > 0:
logger.debug('Perform geometric transformation')
h, w = a.shape
hs = np.random.randn(h, w)
ws = np.random.randn(h, w)
hs = gaussian_filter(hs, dsigma)
ws = gaussian_filter(ws, dsigma)
hs *= distort / np.amax(hs)
ws *= distort / np.amax(ws)
def _f(p):
return (p[0] + hs[p[0], p[1]], p[1] + ws[p[0], p[1]])
a = geometric_transform(a, _f, output_shape=(h, w), order=1, mode='constant', cval=np.amax(a))
im = array2pil(a).convert('L')
return im
def degrade_line(im, eta=0.0, alpha=1.5, beta=1.5, alpha_0=1.0, beta_0=1.0):
"""
Degrades a line image by adding noise.
For parameter meanings consult [1].
Args:
im (PIL.Image): Input image
eta (float):
alpha (float):
beta (float):
alpha_0 (float):
beta_0 (float):
Returns:
PIL.Image in mode '1'
"""
logger.debug('Inverting and normalizing input image')
im = pil2array(im)
im = np.amax(im)-im
im = im*1.0/np.amax(im)
logger.debug('Calculating foreground distance transform')
fg_dist = distance_transform_cdt(1-im, metric='taxicab')
logger.debug('Calculating flip to white probability')
fg_prob = alpha_0 * np.exp(-alpha * (fg_dist**2)) + eta
fg_prob[im == 1] = 0
fg_flip = np.random.binomial(1, fg_prob)
logger.debug('Calculating background distance transform')
bg_dist = distance_transform_cdt(im, metric='taxicab')
logger.debug('Calculating flip to black probability')
bg_prob = beta_0 * np.exp(-beta * (bg_dist**2)) + eta
bg_prob[im == 0] = 0
bg_flip = np.random.binomial(1, bg_prob)
# flip
logger.debug('Flipping')
im -= bg_flip
im += fg_flip
logger.debug('Binary closing')
sel = np.array([[1, 1], [1, 1]])
im = binary_closing(im, sel)
logger.debug('Converting to image')
return array2pil(255-im.astype('B')*255)
def distort_line(im, distort=3.0, sigma=10, eps=0.03, delta=0.3):
"""
Distorts a line image.
Run BEFORE degrade_line as a white border of 5 pixels will be added.
Args:
im (PIL.Image): Input image
distort (float):
sigma (float):
eps (float):
delta (float):
Returns:
PIL.Image in mode 'L'
"""
w, h = im.size
# XXX: determine correct output shape from transformation matrices instead
# of guesstimating.
logger.debug('Pasting source image into canvas')
image = Image.new('L', (int(1.5*w), 4*h), 255)
image.paste(im, (int((image.size[0] - w) / 2), int((image.size[1] - h) / 2)))
line = pil2array(image.convert('L'))
# shear in y direction with factor eps * randn(), scaling with 1 + eps *
# randn() in x/y axis (all offset at d)
logger.debug('Performing affine transformation')
m = np.array([[1 + eps * np.random.randn(), 0.0], [eps * np.random.randn(), 1.0 + eps * np.random.randn()]])
c = np.array([w/2.0, h/2])
d = c - np.dot(m, c) + np.array([np.random.randn() * delta, np.random.randn() * delta])
line = affine_transform(line, m, offset=d, | |
constant?
plc = 0x33
def __init__(self, kp_sn: str, sequence: int, bs_sn: str):
self.bs_sn = bs_sn
super().__init__(self.plc, kp_sn, sequence, self.msg_type, self.info_type, self.event_type, self.payload_body, self.footer_body)
@classmethod
def factory(cls, msg: BaseStationKeypadMessage, recurse: bool=True):
if msg.plc != cls.plc:
raise InvalidMessageBytesError
if msg.msg_type != cls.msg_type:
raise InvalidMessageBytesError
if msg.info_type != cls.info_type:
raise InvalidMessageBytesError
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
if msg.payload_body != cls.payload_body:
raise InvalidMessageBytesError
bs_sn = SerialNumberFormat.unpack(SerialNumberFormat.HEX_5B6C, msg.footer_body)
return cls(msg.sn, msg.sequence, bs_sn)
class BaseStationKeypadAwayResponse(BaseStationKeypadMessage, BaseStationKeypadResponseTrait, BaseStationKeypadStatusMessageTrait):
event_type = KeypadMessage.EventType.AWAY_REQUEST
payload_body = bytes([0x78]) # TODO: why constant?
plc = 0x33
def __init__(self, kp_sn: str, sequence: int, bs_sn: str):
self.bs_sn = bs_sn
super().__init__(0x33, kp_sn, sequence, self.msg_type, self.info_type, self.event_type, self.payload_body, self.footer_body)
@classmethod
def factory(cls, msg: BaseStationKeypadMessage, recurse: bool=True):
if msg.plc != 0x33:
raise InvalidMessageBytesError
if msg.msg_type != cls.msg_type:
raise InvalidMessageBytesError
if msg.info_type != msg.info_type:
raise InvalidMessageBytesError
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
if msg.payload_body != cls.payload_body:
raise InvalidMessageBytesError
bs_sn = SerialNumberFormat.unpack(SerialNumberFormat.HEX_5B6C, msg.footer_body)
return cls(msg.sn, msg.sequence, bs_sn)
class BaseStationKeypadOffRemoteUpdate(BaseStationKeypadMessage, BaseStationKeypadUpdateTrait, BaseStationKeypadStatusMessageTrait):
event_type = KeypadMessage.EventType.OFF_REMOTE_UPDATE
payload_body = bytes([0xFF]) # TODO: why constant?
plc = 0x33
def __init__(self, kp_sn: str, sequence: int, bs_sn: str):
self.bs_sn = bs_sn
super().__init__(0x33, kp_sn, sequence, self.msg_type, self.info_type, self.event_type, self.payload_body, self.footer_body)
@classmethod
def factory(cls, msg: BaseStationKeypadMessage, recurse: bool=True):
if msg.plc != 0x33:
raise InvalidMessageBytesError
if msg.msg_type != cls.msg_type:
raise InvalidMessageBytesError
if msg.info_type != cls.info_type:
raise InvalidMessageBytesError
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
if msg.payload_body != cls.payload_body:
raise InvalidMessageBytesError
bs_sn = SerialNumberFormat.unpack(SerialNumberFormat.HEX_5B6C, msg.footer_body)
return cls(msg.sn, msg.sequence, bs_sn)
class BaseStationKeypadEnterMenuResponse(BaseStationKeypadMessage, BaseStationKeypadResponseTrait, BaseStationKeypadMenuMessageTrait):
plc = 0x33
event_type = KeypadMessage.EventType.ENTER_MENU_REQUEST
payload_body = bytes([0x01]) # TODO: why constant?
def __init__(self, kp_sn: str, sequence: int):
super().__init__(0x33, kp_sn, sequence, self.msg_type, self.info_type, self.event_type, self.payload_body, self.footer_body)
@classmethod
def factory(cls, msg: BaseStationKeypadMessage, recurse: bool=True):
if msg.plc != cls.plc:
raise InvalidMessageBytesError
if msg.msg_type != cls.msg_type:
raise InvalidMessageBytesError
if msg.info_type != cls.info_type:
raise InvalidMessageBytesError
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
if msg.footer_body != cls.footer_body:
raise InvalidMessageBytesError
if msg.payload_body != cls.payload_body:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence)
class BaseStationKeypadNewPrefixResponse(BaseStationKeypadMessage, BaseStationKeypadResponseTrait, BaseStationKeypadMenuMessageTrait):
event_type = KeypadMessage.EventType.NEW_PREFIX_REQUEST
payload_body = bytes([0x00]) # TODO: See if keypad responds to other values (guessing anything other than 0x00 is "not accepted")
plc = 0x33
def __init__(self, kp_sn: str, sequence: int):
super().__init__(self.plc, kp_sn, sequence, self.msg_type, self.info_type, self.event_type, self.payload_body, self.footer_body)
@classmethod
def factory(cls, msg: BaseStationKeypadMessage, recurse: bool=True):
if msg.plc != cls.plc:
raise InvalidMessageBytesError
if msg.msg_type != cls.msg_type:
raise InvalidMessageBytesError
if msg.info_type != cls.info_type:
raise InvalidMessageBytesError
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
if msg.footer_body != cls.footer_body:
raise InvalidMessageBytesError
if msg.payload_body != cls.payload_body:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence)
class BaseStationKeypadRemoveComponentSelectMenuResponse(BaseStationKeypadMessage, BaseStationKeypadResponseTrait, BaseStationKeypadMenuMessageTrait):
event_type = KeypadMessage.EventType.REMOVE_COMPONENT_SELECT_MENU_REQUEST
payload_body = bytes([0x00]) # TODO: why constant?
plc = 0x33
def __init__(self, kp_sn: str, sequence: int):
super().__init__(self.plc, kp_sn, sequence, self.msg_type, self.info_type, self.event_type, self.payload_body, self.footer_body)
@classmethod
def factory(cls, msg: BaseStationKeypadMessage, recurse: bool=True):
if msg.plc != cls.plc:
raise InvalidMessageBytesError
if msg.msg_type != cls.msg_type:
raise InvalidMessageBytesError
if msg.info_type != cls.info_type:
raise InvalidMessageBytesError
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
if msg.footer_body != cls.footer_body:
raise InvalidMessageBytesError
if msg.payload_body != cls.payload_body:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence)
class BaseStationKeypadRemoveComponentConfirmMenuResponse(BaseStationKeypadMessage, BaseStationKeypadResponseTrait, BaseStationKeypadMenuMessageTrait):
event_type = KeypadMessage.EventType.REMOVE_COMPONENT_CONFIRM_MENU_REQUEST
payload_body = bytes([0x00]) # TODO: why constant?
plc = 0x33
def __init__(self, kp_sn: str, sequence: int):
super().__init__(self.plc, kp_sn, sequence, self.msg_type, self.info_type, self.event_type, self.payload_body, self.footer_body)
@classmethod
def factory(cls, msg: BaseStationKeypadMessage, recurse: bool=True):
if msg.plc != cls.plc:
raise InvalidMessageBytesError
if msg.msg_type != cls.msg_type:
raise InvalidMessageBytesError
if msg.info_type != cls.info_type:
raise InvalidMessageBytesError
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
if msg.footer_body != cls.footer_body:
raise InvalidMessageBytesError
if msg.payload_body != cls.payload_body:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence)
class BaseStationKeypadAddComponentSerialMenuResponse(BaseStationKeypadMessage, BaseStationKeypadResponseTrait, BaseStationKeypadMenuMessageTrait):
plc = 0x33
class ResponseType(UniqueIntEnum):
COMPONENT_ADDED = 0x00
COMPONENT_ALREADY_ADDED = 0x01
def __init__(self, kp_sn: str, sequence: int, event_type: KeypadMessage.EventType, response_type: 'BaseStationKeypadAddComponentSerialMenuResponse.ResponseType'):
self.response_type = response_type
super().__init__(self.plc, kp_sn, sequence, self.msg_type, self.info_type, event_type, self.payload_body, self.footer_body)
def __str__(self):
s = super().__str__()
s += 'Response Type: ' + self.response_type.__class__.key(self.response_type) + "\n"
return s
@classmethod
def factory(cls, msg: BaseStationKeypadMessage, recurse: bool=True):
if msg.plc != cls.plc:
raise InvalidMessageBytesError
if msg.msg_type != cls.msg_type:
raise InvalidMessageBytesError
if msg.info_type != cls.info_type:
raise InvalidMessageBytesError
if msg.footer_body != cls.footer_body:
raise InvalidMessageBytesError
response_type = cls.ResponseType(msg.payload_body[0])
msg = cls(msg.sn, msg.sequence, msg.event_type, response_type)
if recurse:
msg = cls.from_parent(msg)
return msg
@property
def payload_body(self):
return bytes([self.response_type])
@payload_body.setter
def payload_body(self, value):
if value != self.payload_body:
raise ValueError
class BaseStationKeypadAddEntrySensorMenuResponse(BaseStationKeypadAddComponentSerialMenuResponse):
event_type = KeypadMessage.EventType.ADD_ENTRY_SENSOR_MENU_REQUEST
def __init__(self, kp_sn: str, sequence: int, response_type):
super().__init__(kp_sn, sequence, self.event_type, response_type)
@classmethod
def factory(cls, msg: BaseStationKeypadAddComponentSerialMenuResponse, recurse: bool=True):
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence, msg.response_type)
class BaseStationKeypadAddMotionSensorMenuResponse(BaseStationKeypadAddComponentSerialMenuResponse):
event_type = KeypadMessage.EventType.ADD_MOTION_SENSOR_MENU_REQUEST
def __init__(self, kp_sn: str, sequence: int, response_type):
super().__init__(kp_sn, sequence, self.event_type, response_type)
@classmethod
def factory(cls, msg: BaseStationKeypadAddComponentSerialMenuResponse, recurse: bool=True):
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence, msg.response_type)
class BaseStationKeypadAddPanicButtonMenuResponse(BaseStationKeypadAddComponentSerialMenuResponse):
event_type = KeypadMessage.EventType.ADD_PANIC_BUTTON_MENU_REQUEST
def __init__(self, kp_sn: str, sequence: int, response_type):
super().__init__(kp_sn, sequence, self.event_type, response_type)
@classmethod
def factory(cls, msg: BaseStationKeypadAddComponentSerialMenuResponse, recurse: bool=True):
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence, msg.response_type)
class BaseStationKeypadAddKeychainRemoteMenuResponse(BaseStationKeypadAddComponentSerialMenuResponse):
event_type = KeypadMessage.EventType.ADD_KEYCHAIN_REMOTE_MENU_REQUEST
def __init__(self, kp_sn: str, sequence: int, response_type):
super().__init__(kp_sn, sequence, self.event_type, response_type)
@classmethod
def factory(cls, msg: BaseStationKeypadAddComponentSerialMenuResponse, recurse: bool=True):
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence, msg.response_type)
class BaseStationKeypadAddGlassbreakSensorMenuResponse(BaseStationKeypadAddComponentSerialMenuResponse):
event_type = KeypadMessage.EventType.ADD_GLASSBREAK_SENSOR_MENU_REQUEST
def __init__(self, kp_sn: str, sequence: int, response_type):
super().__init__(kp_sn, sequence, self.event_type, response_type)
@classmethod
def factory(cls, msg: BaseStationKeypadAddComponentSerialMenuResponse, recurse: bool=True):
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence, msg.response_type)
class BaseStationKeypadAddSmokeDetectorMenuResponse(BaseStationKeypadAddComponentSerialMenuResponse):
event_type = KeypadMessage.EventType.ADD_SMOKE_DETECTOR_MENU_REQUEST
def __init__(self, kp_sn: str, sequence: int, response_type):
super().__init__(kp_sn, sequence, self.event_type, response_type)
@classmethod
def factory(cls, msg: BaseStationKeypadAddComponentSerialMenuResponse, recurse: bool=True):
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence, msg.response_type)
class BaseStationKeypadAddCoDetectorMenuResponse(BaseStationKeypadAddComponentSerialMenuResponse):
event_type = KeypadMessage.EventType.ADD_CO_DETECTOR_MENU_REQUEST
def __init__(self, kp_sn: str, sequence: int, response_type):
super().__init__(kp_sn, sequence, self.event_type, response_type)
@classmethod
def factory(cls, msg: BaseStationKeypadAddComponentSerialMenuResponse, recurse: bool=True):
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence, msg.response_type)
class BaseStationKeypadAddFreezeSensorMenuResponse(BaseStationKeypadAddComponentSerialMenuResponse):
event_type = KeypadMessage.EventType.ADD_FREEZE_SENSOR_MENU_REQUEST
def __init__(self, kp_sn: str, sequence: int, response_type):
super().__init__(kp_sn, sequence, self.event_type, response_type)
@classmethod
def factory(cls, msg: BaseStationKeypadAddComponentSerialMenuResponse, recurse: bool=True):
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence, msg.response_type)
class BaseStationKeypadAddWaterSensorMenuResponse(BaseStationKeypadAddComponentSerialMenuResponse):
event_type = KeypadMessage.EventType.ADD_WATER_SENSOR_MENU_REQUEST
def __init__(self, kp_sn: str, sequence: int, response_type):
super().__init__(kp_sn, sequence, self.event_type, response_type)
@classmethod
def factory(cls, msg: BaseStationKeypadAddComponentSerialMenuResponse, recurse: bool=True):
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
return cls(msg.sn, msg.sequence, msg.response_type)
class BaseStationKeypadSimpleMessageTrait:
plc = 0x22
payload_body = bytes()
class BaseStationKeypadSimpleStatusMessage(BaseStationKeypadMessage, BaseStationKeypadSimpleMessageTrait, BaseStationKeypadStatusMessageTrait):
def __init__(self, kp_sn: str, sequence: int, bs_sn: str, msg_type, event_type: KeypadMessage.EventType):
self.bs_sn = bs_sn
super().__init__(self.plc, kp_sn, sequence, msg_type, self.info_type, event_type, self.payload_body, self.footer_body)
@classmethod
def factory(cls, msg: BaseStationKeypadMessage, recurse: bool=True):
if msg.plc != cls.plc:
raise InvalidMessageBytesError
if msg.payload_body != cls.payload_body:
raise InvalidMessageBytesError
if msg.info_type != cls.info_type:
raise InvalidMessageBytesError
bs_sn = SerialNumberFormat.unpack(SerialNumberFormat.HEX_5B6C, msg.footer_body)
msg = cls(msg.sn, msg.sequence, bs_sn, msg.msg_type, msg.event_type)
if recurse:
msg = cls.from_parent(msg)
return msg
class BaseStationKeypadSimpleMenuMessage(BaseStationKeypadMessage, BaseStationKeypadSimpleMessageTrait, BaseStationKeypadMenuMessageTrait):
def __init__(self, kp_sn: str, sequence: int, msg_type, event_type: KeypadMessage.EventType):
super().__init__(self.plc, kp_sn, sequence, msg_type, self.info_type, event_type, self.payload_body, self.footer_body)
@classmethod
def factory(cls, msg: BaseStationKeypadMessage, recurse: bool=True):
if msg.plc != cls.plc:
raise InvalidMessageBytesError
if msg.payload_body != cls.payload_body:
raise InvalidMessageBytesError
if msg.info_type != cls.info_type:
raise InvalidMessageBytesError
msg = cls(msg.sn, msg.sequence, msg.msg_type, msg.event_type)
if recurse:
msg = cls.from_parent(msg)
return msg
#Level 4
class BaseStationKeypadTestModeOnResponse(BaseStationKeypadSimpleStatusMessage, BaseStationKeypadResponseTrait):
event_type = KeypadMessage.EventType.TEST_MODE_ON_REQUEST
def __init__(self, kp_sn: str, sequence: int, bs_sn: str):
super().__init__(kp_sn, sequence, bs_sn, self.msg_type, self.event_type)
@classmethod
def factory(cls, msg: BaseStationKeypadSimpleStatusMessage, recurse: bool=True):
if msg.msg_type != cls.msg_type:
raise InvalidMessageBytesError
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
msg = cls(msg.sn, msg.sequence, msg.bs_sn)
return msg
class BaseStationKeypadTestModeOnUpdate(BaseStationKeypadSimpleStatusMessage, BaseStationKeypadUpdateTrait):
event_type = KeypadMessage.EventType.TEST_MODE_ON_REQUEST
def __init__(self, kp_sn: str, sequence: int, bs_sn: str):
super().__init__(kp_sn, sequence, bs_sn, self.msg_type, self.event_type)
@classmethod
def factory(cls, msg: BaseStationKeypadSimpleStatusMessage, recurse: bool=True):
if msg.msg_type != cls.msg_type:
raise InvalidMessageBytesError
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
msg = cls(msg.sn, msg.sequence, msg.bs_sn)
return msg
class BaseStationKeypadOffResponse(BaseStationKeypadSimpleStatusMessage, BaseStationKeypadResponseTrait):
event_type = KeypadMessage.EventType.OFF_REQUEST
def __init__(self, kp_sn: str, sequence: int, bs_sn: str):
super().__init__(kp_sn, sequence, bs_sn, self.msg_type, self.event_type)
@classmethod
def factory(cls, msg: BaseStationKeypadSimpleStatusMessage, recurse: bool=True):
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
msg = cls(msg.sn, msg.sequence, msg.bs_sn)
return msg
class BaseStationKeypadTestModeOffResponse(BaseStationKeypadSimpleStatusMessage, BaseStationKeypadResponseTrait):
event_type = KeypadMessage.EventType.TEST_MODE_OFF_REQUEST
def __init__(self, kp_sn: str, sequence: int, bs_sn: str):
super().__init__(kp_sn, sequence, bs_sn, self.msg_type, self.event_type)
@classmethod
def factory(cls, msg: BaseStationKeypadSimpleStatusMessage, recurse: bool=True):
if msg.msg_type != cls.msg_type:
raise InvalidMessageBytesError
if msg.event_type != cls.event_type:
raise InvalidMessageBytesError
msg = cls(msg.sn, msg.sequence, msg.bs_sn)
return msg
class BaseStationKeypadTestModeOffUpdate(BaseStationKeypadSimpleStatusMessage, BaseStationKeypadUpdateTrait):
event_type = KeypadMessage.EventType.TEST_MODE_OFF_REQUEST
def __init__(self, kp_sn: str, sequence: int, bs_sn: str):
super().__init__(kp_sn, sequence, bs_sn, self.msg_type, self.event_type)
@classmethod
def factory(cls, msg: BaseStationKeypadSimpleStatusMessage, recurse: bool=True):
if msg.msg_type != cls.msg_type:
raise | |
<reponame>ahameedx/intel-inb-manageability
"""
Allows API of xlink driver C library to be called in Python.
Copyright (C) 2019-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
import logging
import threading
import os
from ctypes import *
from threading import Lock
from typing import Callable, Tuple
import time
from inbm_vision_lib.constants import SECURE_XLINK_LIB_PATH, XLINK_LIB_PATH, NODE_BUFFER_TIMEOUT, \
VISION_BUFFER_TIMEOUT, XLINK_SECURE_DATA_SIZE, MAXIMUM_STORE_FILE_SIZE, XLINK_FILE_TRANSFER_RATE, \
SECURE_XLINK_PROVISION_LIB_PATH, MINIMUM_GUID_LENGTH, MAXIMUM_GUID_BUFFER
from ..constants import VISION
from .ixlink_wrapper import IXlinkWrapper, xlink_handle, xlink_prof_cfg, HOST_DEVICE, PCIE, X_LINK_SUCCESS, \
XlinkWrapperException
logger = logging.getLogger(__name__)
class xlink_global_handle(Structure):
"""Struct of XLinkGlobalHandler_t"""
_fields_ = [("loglevel", c_int),
("prof_cfg", xlink_prof_cfg)]
class XlinkSecureWrapper(IXlinkWrapper):
"""Wrapper class to use secured xlink shared library
@param receive_callback: Callback for receiving messages over xlink
@param channel_id: Channel used for xlink communication
@param pcie_num: PCIe Channel for xlink channel
@param is_boot_dev: True if xlink boot device API to be called; otherwise, False
"""
def __init__(self, receive_callback: Callable, channel_id: int, pcie_num: int, is_boot_dev: bool) -> None:
super().__init__(XLINK_LIB_PATH,
receive_callback,
channel_id,
xlink_global_handle(prof_cfg=PCIE),
XLINK_SECURE_DATA_SIZE,
xlink_handle(dev_type=HOST_DEVICE),
pcie_num,
async_cb=None)
self._xlink_handler.sw_device_id = self._xlink_pcie_num
self._is_boot_dev = is_boot_dev
# Xlink secure only support part of APIs, need to use regular xlink library for other APIs
self._secure_xlink = CDLL(SECURE_XLINK_LIB_PATH)
self._open_channel_lock = Lock()
self._read_data_lock = Lock()
self._write_data_lock = Lock()
self.init_thread = threading.Thread(target=self._init_channel)
self.init_thread.daemon = True
self.init_thread.start()
self._listen_thread = threading.Thread(target=self._listen_to_channel)
self._listen_thread.daemon = True
def _init_channel(self):
"""Initialize Xlink handler, connect the handler and open channel"""
logger.debug('Start Xlink Secure initialization.')
self.xlink_init_status_success = False
while self._running:
logger.debug('waiting xlink_secure_initialize...')
status = self._secure_xlink.xlink_secure_initialize()
if status is X_LINK_SUCCESS:
break
time.sleep(1)
logger.debug('xlink_secure_initialize complete.')
logger.debug(f"PCIE Number: {self._xlink_pcie_num}")
if self._is_boot_dev:
self.boot_device()
xlink_handler_p = byref(self._xlink_handler)
logger.debug(
'xlink_connect start connecting... Waiting the connection...')
while self._running:
status = self._secure_xlink.xlink_secure_connect(xlink_handler_p)
if status is X_LINK_SUCCESS:
logger.debug('xlink_connect pass.')
logger.debug('xlink_open_channel. Channel ID - ' +
str(self._channel_id.value))
break
logger.debug(
'xlink_connect start connecting... Waiting the connection...')
time.sleep(1)
while self._running:
if self._open_channel_lock.acquire():
timeout = VISION_BUFFER_TIMEOUT if self._agent == VISION else NODE_BUFFER_TIMEOUT
try:
status = self._secure_xlink.xlink_secure_open_channel(xlink_handler_p, self._channel_id,
self._operation_type,
self._data_size, timeout * 1000)
finally:
self._open_channel_lock.release()
if status is X_LINK_SUCCESS:
logger.debug('Opened secure Xlink Channel. Channel ID - ' +
str(self._channel_id.value))
# Wait 5 seconds for xlink to stabilize
time.sleep(5)
self.xlink_init_status_success = True
logger.info('Xlink Secure initialization complete.')
break
else:
pass
time.sleep(1)
def boot_device(self) -> None:
""" Call xlink API to boot the device.
vision-agent will boot the device. Currently there is no support to boot VPU FW from node.
"""
super().boot_device()
def reset_device(self) -> None:
"""Call xlink API to reset the device"""
super().reset_device()
def _register_callback(self) -> None:
"""Register callback to the xlink"""
status = self._xlink_library.xlink_data_available_event(byref(self._xlink_handler), self._channel_id,
c_void_p())
if status is not X_LINK_SUCCESS:
logger.error('Xlink Data Event Failed - %s', str(status))
status = self._xlink_library.xlink_data_consumed_event(byref(self._xlink_handler), self._channel_id,
c_void_p())
if status is not X_LINK_SUCCESS:
logger.error('Xlink Data Event Failed - %s ', str(status))
logger.debug("xlink callback register pass.")
def _listen_to_channel(self):
"""Listen the channel and waiting for incoming message"""
# Waiting xlink initialization complete
s_buffer = create_string_buffer(self._data_size)
message = POINTER(c_char)(s_buffer) # type: ignore
while self._running and not self.xlink_init_status_success:
time.sleep(1)
while self._running:
size = c_uint32(0)
while self._running and size.value == 0 and self._read_data_lock.acquire():
try:
self._secure_xlink.xlink_secure_read_data(byref(self._xlink_handler), self._channel_id,
byref(message),
byref(size))
time.sleep(1)
finally:
self._read_data_lock.release()
if size.value != 0:
logger.info('Received message size ' +
str(size.value) + '. Message is:')
message_combined = ''
for i in range(size.value):
message_combined = message_combined + \
message[i].decode('utf-8') # type: ignore
if i == (int(size.value) - 1):
logger.info('%s', str(message_combined))
self._xlink_release_data()
if self._receive_callback is not None:
logger.info(
'Receive callback method exist. Call the method.')
self._receive_callback(message_combined)
def receive_file(self, file_save_path: str) -> str:
"""Receive update file and save it to the local repository.
@param file_save_path: local path to save the update file
@return : (str) received file name
"""
super()._check_directory(file_save_path)
logger.debug("Switch to receive file mode.")
s_buffer = create_string_buffer(self._data_size)
message = POINTER(c_char)(s_buffer) # type: ignore
size = c_uint32(0)
# Receive file name
while size.value == 0 and self._running:
self._secure_xlink.xlink_secure_read_data(byref(self._xlink_handler), self._channel_id, byref(message),
byref(size))
time.sleep(1)
file_name = ""
for i in range(size.value):
file_name = file_name + message[i].decode('utf-8') # type: ignore
logger.debug(f"Receive file name - {file_name}")
file_path = os.path.join(file_save_path, file_name)
self._xlink_release_data()
# Receive number of chunk
size = c_uint32(0)
while size.value == 0 and self._running:
self._secure_xlink.xlink_secure_read_data(byref(self._xlink_handler), self._channel_id, byref(message),
byref(size))
time.sleep(1)
chunk_message = ""
for i in range(size.value):
chunk_message = chunk_message + \
message[i].decode('utf-8') # type: ignore
num_of_chunk = int(chunk_message)
logger.debug(f"Number of chunk - {chunk_message}")
self._xlink_release_data()
# Receive update file
logger.info("Receiving file. Please wait......")
# Reset size for receiving file
with open(file_path, 'wb') as update_file:
if num_of_chunk > 1:
file_collect = b''
for num in range(num_of_chunk):
logger.info("{}/{}".format(num, num_of_chunk - 1))
size = c_uint32(0)
while size.value == 0 and self._running:
self._secure_xlink.xlink_secure_read_data(byref(self._xlink_handler), self._channel_id,
byref(
message),
byref(size))
time.sleep(0.5)
file_collect = file_collect + \
message[:size.value] # type: ignore
# Write to file if file stored in memory larger than the limit or it is the last chunk of file.
if len(file_collect) > MAXIMUM_STORE_FILE_SIZE or num == (num_of_chunk - 1):
logger.debug("write to file")
update_file.write(file_collect) # type: ignore
update_file.flush()
file_collect = b''
if num != (num_of_chunk - 1):
self._xlink_release_data()
else:
size = c_uint32(0)
while size.value == 0 and self._running:
self._secure_xlink.xlink_secure_read_data(byref(self._xlink_handler), self._channel_id,
byref(message),
byref(size))
for i in range(size.value):
# Temporary disable the progress bar as it causes slowness in simics.
# progress = receive_file_progress(i, int(size.value))
# if progress:
# logger.info("Receiving file size " + str(progress) + "%")
update_file.write(message[i]) # type: ignore
self._xlink_release_data()
logger.info("Receive file complete. File size: %i",
os.path.getsize(file_path))
logger.debug("File stored at: %s", file_path)
return file_name
def get_init_status(self) -> bool:
""" Get the initialization status
@return: boolean representing initialization status
"""
return self.xlink_init_status_success
def start(self) -> None:
"""start to listen the receive channel"""
self._listen_thread.start()
def send(self, message: str) -> None:
"""Send the message through xlink write data API
@param message: message to be sent
"""
# Waiting xlink initialization complete
while self._running and not self.xlink_init_status_success:
time.sleep(0.1)
if self.xlink_init_status_success:
logger.debug('Sending message: ' + str(message))
self._write_data_via_secured(message)
else:
logger.info('Stop XLinkWriteData')
def receive(self, message: str) -> None:
"""Receive message"""
pass
def get_xlink_device_status(self) -> int:
""" Check the xlink device status.
@return: status of xlink device
"""
pass
def _write_data_via_secured(self, message: str):
while self._running and self._write_data_lock.acquire():
try:
status = self._secure_xlink.xlink_secure_write_data(byref(self._xlink_handler), self._channel_id,
message.encode(
'utf8'),
len(message.encode('utf8')))
super()._check_status(status, 'XLinkSecureWriteData failed.')
finally:
self._write_data_lock.release()
break
def send_file(self, file_path: str) -> None:
# inherit docstring from superclass
super()._check_directory(file_path)
self._write_data_via_secured("FILE")
time.sleep(1)
file_name = file_path.rsplit('/')[-1]
logger.debug("sending file via xlink: " + file_name)
self._write_data_via_secured(file_name)
time.sleep(1)
chunk_message, number_of_chunk, transfer_size = self.get_chunk_message(
file_path)
self._write_data_via_secured(chunk_message)
time.sleep(1)
if number_of_chunk > 1:
with open(file_path, 'rb') as update_file:
for num in range(number_of_chunk):
if num == number_of_chunk - 1:
read_file = update_file.read()
else:
read_file = update_file.read(transfer_size)
status = self._secure_xlink.xlink_secure_write_data(
byref(self._xlink_handler), self._channel_id, read_file, len(read_file))
super()._check_status(status, 'XLinkWriteData data failed.')
# For larger file size, increase the waiting time due to xlink instability
time.sleep(0.01)
else:
with open(file_path, 'rb') as update_file:
read_file = update_file.read()
status = self._secure_xlink.xlink_secure_write_data(
byref(self._xlink_handler), self._channel_id, read_file, len(read_file))
super()._check_status(status, 'XLinkWriteData data failed.')
def _xlink_release_data(self) -> None:
"""Release xlink data buffer"""
status = self._secure_xlink.xlink_secure_release_data(
byref(self._xlink_handler), self._channel_id, None)
super()._check_status(status, 'XLink release data failed.')
@staticmethod
def get_guid(sw_device_id: int) -> Tuple[str, str]:
"""Call secure xlink API to get specific node's GUID and SVN.
@param sw_device_id: sw_device_id to be checked
@return: GUID of node, SVN of node
"""
logger.debug(f"get_guid: sw_device_id: {sw_device_id}")
try:
guid = create_string_buffer(MAXIMUM_GUID_BUFFER)
guid_len = c_uint32(MINIMUM_GUID_LENGTH)
svn = c_uint32(0)
_secure_xlink_provision = CDLL(SECURE_XLINK_PROVISION_LIB_PATH)
# Get GUID
status = _secure_xlink_provision.secure_xlink_provision_read_guid(
sw_device_id, guid, guid_len)
IXlinkWrapper._check_status(status, f'Secure xlink read GUID failed with status {status}. '
f'SWID - {sw_device_id}')
logger.debug(f"GUID = {guid.value.decode('utf-8')}")
# Get SVN
status = _secure_xlink_provision.secure_xlink_provision_read_svn(
sw_device_id, byref(svn))
IXlinkWrapper._check_status(status, f'Secure xlink read GUID failed with status {status}. '
f'SWID - {sw_device_id}')
logger.debug(f"svn = {str(svn.value)}")
return guid.value.decode('utf-8'), str(svn.value)
except (XlinkWrapperException, OSError, TypeError, SystemError) as e:
logger.error(
f'Error retrieving GUID for node with device_id: {sw_device_id}. Error: {e}')
return "0", "0"
@staticmethod
def is_provisioned(sw_device_id: int) -> bool:
"""Call secure xlink API to get node's provisioned status.
@param sw_device_id: sw_device_id to be checked
@return: True if provisioned. False if not provisioned.
"""
try:
_secure_xlink_provision = CDLL(SECURE_XLINK_PROVISION_LIB_PATH)
provision_status = c_int(0)
status = _secure_xlink_provision.secure_xlink_is_provisioned(
sw_device_id, byref(provision_status))
IXlinkWrapper._check_status(status, f'Secure xlink get provisioned status failed with status {status}. '
f'SWID - {sw_device_id}')
status = True if provision_status.value else False
logger.debug(
f"is_provisioned status of {str(sw_device_id)}: {status}")
return status
except (XlinkWrapperException, OSError, SystemError) as e:
logger.error(str(e))
return False
def stop(self, disconnect: bool = False) -> None:
# inherit docstring from superclass
| |
main complaint at last visit"), "achievement"),
"termination_type_id",
S3SQLInlineComponent(
"document",
name = "file",
label = T("Attachments"),
fields = ["file", "comments"],
filterby = {"field": "file",
"options": "",
"invert": True,
},
),
"comments",
)
# Custom list fields
list_fields = ["person_id",
"need__link.need_id",
"service_id",
"human_resource_id",
"project_id",
"activity_id",
]
else:
# Other perspective (currently unused)
expose_project_id(s3db.dvr_case_activity)
crud_form = S3SQLCustomForm("person_id",
"project_id",
"service_id",
#"need_id",
"followup",
"followup_date",
"activity_funding.funding_required",
"activity_funding.reason",
"comments",
)
# Custom list fields
list_fields = ["person_id",
"project_id",
"service_id",
"followup",
"followup_date",
]
s3db.configure("dvr_case_activity",
crud_form = crud_form,
list_fields = list_fields,
owner_group = stl_case_activity_owner_group,
)
s3db.add_custom_callback("dvr_case_activity",
"onvalidation",
case_activity_validation,
)
settings.customise_dvr_case_activity_resource = customise_dvr_case_activity_resource
# -------------------------------------------------------------------------
def customise_dvr_case_activity_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Enable scalability-optimized strategies
settings.base.bigtable = True
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Show person_id as link to case
table = r.resource.table
table.person_id.represent = s3db.pr_PersonRepresent(show_link=True)
if r.function == "due_followups":
crud_strings = s3.crud_strings["dvr_case_activity"]
crud_strings["title_list"] = T("Interventions to follow-up")
# Custom list fields
list_fields = [(T("Ref.No."), "person_id$pe_label"),
"person_id",
(T("Protection Response Sector"), "case_activity_need.need_id"),
"service_id",
"followup_date",
]
# Custom filter widgets
from s3 import S3TextFilter, S3DateFilter
filter_widgets = [S3TextFilter(["person_id$pe_label",
"person_id$first_name",
"person_id$last_name",
"case_activity_need.need_id$name",
"service_id$name",
],
label = T("Search"),
),
S3DateFilter("followup_date",
cols = 2,
hidden = True,
),
]
else:
# Custom list fields
list_fields = ["person_id",
"service_id",
"human_resource_id",
"project_id",
"need__link.need_id",
"start_date",
(T("Interventions Required"),
"response_type__link.response_type_id"),
"priority",
"followup",
"followup_date",
"completed",
"end_date",
]
# Custom filter widgets
#Project Code,
#Person Responsible,
#Protection Assesment,
#Service Type,
#Protection Response Sector,
#Interventions required
# Limit HR filter options to selectable HRs
requires = table.human_resource_id.requires
if hasattr(requires, "options"):
hr_filter_opts = dict(opt for opt in requires.options() if opt[0])
else:
hr_filter_opts = None
from s3 import S3DateFilter, \
S3HierarchyFilter, \
S3LocationFilter, \
S3OptionsFilter, \
S3TextFilter, \
s3_get_filter_opts
filter_widgets = [S3TextFilter(["person_id$pe_label",
"person_id$first_name",
"person_id$last_name",
],
label = T("Search"),
),
S3HierarchyFilter("person_id$dvr_case.organisation_id",
leafonly = False,
cascade_select = True,
),
S3OptionsFilter("human_resource_id",
options = hr_filter_opts,
),
S3OptionsFilter("project_id",
options = s3_get_filter_opts("project_project"),
),
S3HierarchyFilter("service_id",
hidden = True,
),
S3HierarchyFilter("vulnerability_type_case_activity.vulnerability_type_id",
label = T("Protection Assessment"),
hidden = True,
),
S3HierarchyFilter("case_activity_need.need_id",
hidden = True,
),
S3HierarchyFilter("response_type_case_activity.response_type_id",
hidden = True,
),
S3OptionsFilter("person_id$dvr_case_details.referral_type_id",
options = s3_get_filter_opts("dvr_referral_type"),
label = T("Referred to Case Management by"),
hidden = True,
),
# Not scalable
#S3LocationFilter("person_id$address.location_id",
# hidden = True,
# ),
S3OptionsFilter("completed",
hidden = True,
),
S3DateFilter("person_id$date_of_birth",
hidden = True,
),
S3DateFilter("start_date",
hidden = True,
),
S3DateFilter("end_date",
hidden = True,
),
]
r.resource.configure(filter_widgets = filter_widgets,
list_fields = list_fields,
)
return result
s3.prep = custom_prep
return attr
settings.customise_dvr_case_activity_controller = customise_dvr_case_activity_controller
# -------------------------------------------------------------------------
def customise_dvr_activity_funding_resource(r, tablename):
T = current.T
table = current.s3db.dvr_activity_funding
field = table.funding_required
field.label = T("Need for SNF")
field = table.reason
field.label = T("Justification for SNF")
settings.customise_dvr_activity_funding_resource = customise_dvr_activity_funding_resource
# -------------------------------------------------------------------------
def customise_dvr_economy_resource(r, tablename):
table = current.s3db.dvr_economy
field = table.monthly_costs
field.label = current.T("Monthly Rent Expense")
settings.customise_dvr_economy_resource = customise_dvr_economy_resource
# -------------------------------------------------------------------------
def customise_dvr_household_resource(r, tablename):
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("hoh_relationship",
"hoh_name",
"hoh_date_of_birth",
"hoh_gender",
S3SQLInlineComponent("beneficiary_data",
fields = [(T("Age Group"), "beneficiary_type_id"),
"female",
"male",
"other",
"out_of_school",
(T("Number Working"), "employed"),
],
label = T("Household Members"),
explicit_add = T("Add Household Members"),
),
"comments",
)
current.s3db.configure("dvr_household",
crud_form = crud_form,
)
settings.customise_dvr_household_resource = customise_dvr_household_resource
# -------------------------------------------------------------------------
def customise_dvr_response_type_resource(r, tablename):
current.response.s3.crud_strings["dvr_response_type"] = Storage(
label_create = T("Create Intervention Type"),
title_display = T("Intervention Type Details"),
title_list = T("Intervention Types"),
title_update = T("Edit Intervention Type"),
label_list_button = T("List Intervention Types"),
label_delete_button = T("Delete Intervention Type"),
msg_record_created = T("Intervention Type created"),
msg_record_modified = T("Intervention Type updated"),
msg_record_deleted = T("Intervention Type deleted"),
msg_list_empty = T("No Intervention Types currently registered"),
)
settings.customise_dvr_response_type_resource = customise_dvr_response_type_resource
# -------------------------------------------------------------------------
def customise_dvr_vulnerability_type_resource(r, tablename):
# Expose required-flag
table = current.s3db.dvr_vulnerability_type
field = table.required
field.readable = field.writable = True
settings.customise_dvr_vulnerability_type_resource = customise_dvr_vulnerability_type_resource
# =========================================================================
# Person Registry
#
# Allow third gender
settings.pr.hide_third_gender = False
# -------------------------------------------------------------------------
def customise_pr_contact_resource(r, tablename):
table = current.s3db.pr_contact
field = table.contact_description
field.readable = field.writable = False
field = table.value
field.label = T("Number or Address")
field = table.contact_method
all_opts = current.msg.CONTACT_OPTS
subset = ("SMS",
"EMAIL",
"HOME_PHONE",
"WORK_PHONE",
"FACEBOOK",
"TWITTER",
"SKYPE",
"OTHER",
)
contact_methods = [(k, all_opts[k]) for k in subset if k in all_opts]
field.requires = IS_IN_SET(contact_methods, zero=None)
field.default = "SMS"
settings.customise_pr_contact_resource = customise_pr_contact_resource
# -------------------------------------------------------------------------
def customise_pr_education_level_resource(r, tablename):
table = current.s3db.pr_education_level
# Hide organisation_id (not used here)
field = table.organisation_id
field.readable = field.writable = False
settings.customise_pr_education_level_resource = customise_pr_education_level_resource
# -------------------------------------------------------------------------
def person_tag_onvalidation(form):
"""
Custom onvalidation callback for person tags
=> INDIVIDUAL_ID must be unique
"""
formvars = form.vars
tag = formvars.tag
value = formvars.value
if tag != "INDIVIDUAL_ID" or value is None:
return
# Is this an update?
if "id" in formvars:
record_id = formvars.id
elif "_id" in formvars:
# Inline component
record_id = formvars._id
elif hasattr(form, "record_id"):
record_id = form.record_id
else:
# New record
record_id = None
# Find a duplicate
table = current.s3db.pr_person_tag
query = (table.tag == "INDIVIDUAL_ID") & \
(table.value == value) & \
(table.deleted == False)
if record_id:
query &= (table.id != record_id)
duplicate = current.db(query).select(table.id,
limitby = (0, 1),
).first()
# Reject if duplicate exists
if duplicate:
form.errors["value"] = current.T("ID already in database")
# -------------------------------------------------------------------------
def customise_pr_person_resource(r, tablename):
s3db = current.s3db
# Configure components to inherit realm_entity from the person record
s3db.configure("pr_person",
realm_components = ("case_activity",
"case_details",
"economy",
"evaluation",
"household",
"person_details",
"address",
"contact",
"contact_emergency",
"presence",
),
update_realm = True,
)
# Custom components
s3db.add_components("pr_person",
# Govt-assigned IDs: Family ID, Individual ID
pr_person_tag = ({"name": "family_id",
"joinby": "person_id",
"filterby": {
"tag": "FAMILY_ID",
},
"multiple": False,
},
{"name": "individual_id",
"joinby": "person_id",
"filterby": {
"tag": "INDIVIDUAL_ID",
},
"multiple": False,
},
),
# Education level (simplified model)
pr_education_level = {"link": "pr_education",
"joinby": "person_id",
"key": "level_id",
},
)
# Add contacts-method
if r.controller == "dvr":
# Use pr_Contacts as contacts-method
s3db.set_method("pr", "person",
method = "contacts",
action = s3db.pr_Contacts,
)
from s3 import IS_PERSON_GENDER
table = s3db.pr_person
# ID label is required, remove tooltip
field = table.pe_label
field.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Reference Number"),
T("The STL Individual Reference Number for this Beneficiary"),
),
)
requires = field.requires
if isinstance(requires, IS_EMPTY_OR):
field.requires = requires.other
# Last name is required
field = table.last_name
field.requires = IS_NOT_EMPTY()
# Date of Birth is required
field = table.date_of_birth
requires = field.requires
if isinstance(requires, IS_EMPTY_OR):
field.requires = requires.other
# Gender is required, remove "unknown" option, adjust label
field = table.gender
field.label = current.T("Gender")
field.default = None
options = dict(s3db.pr_gender_opts)
del options[1] # Remove "unknown"
field.requires = IS_PERSON_GENDER(options, sort=True)
dtable = s3db.pr_person_details
# Nationality is required, default is Syrian
field = dtable.nationality
field.default = "SY"
requires = field.requires
if isinstance(requires, IS_EMPTY_OR):
field.requires = requires.other
# Custom validator for person tags
s3db.add_custom_callback("pr_person_tag",
"onvalidation",
person_tag_onvalidation,
)
settings.customise_pr_person_resource = customise_pr_person_resource
# -------------------------------------------------------------------------
def is_turkish_phone_number(value):
"""
Custom validator for beneficiary mobile phone number:
- requires 3-digit area code and 7-digit local number
- rejects all country codes other than 0090 resp. +90
- fixed output format with leading +90 country code
- removes all whitespace
"""
msg = "Enter turkish phone number in international format like +907837549574"
if isinstance(value, basestring):
import re
match = re.match("^(0090|\+90|0){0,1}((\s*[0-9]){10})$", value.strip())
if match:
error = None
value = "+90%s" % "".join(match.groups()[1].split())
else:
error = msg
else:
error = msg
return value, error
# -------------------------------------------------------------------------
def set_default_pe_label():
"""
Attempt to auto-generate a beneficiary reference number
for the logged-in staff member, using the Staff ID (code)
plus a n-digit number as pattern
"""
db = current.db
auth = current.auth
s3db = current.s3db
ptable = s3db.pr_person
htable = s3db.hrm_human_resource
# Number of trailing digits
DIGITS = 4
# Get the staff ID of the logged-in user
code = None
if auth.s3_logged_in() and auth.user:
query = (ptable.pe_id == auth.user.pe_id) & \
(htable.person_id == ptable.id) & \
(htable.deleted == False)
row = db(query).select(htable.code,
orderby = ~htable.modified_on,
limitby = (0, 1),
).first()
if row:
code = row.code
if not code:
# No staff ID => can not auto-generate reference number
return
# Get the highest reference number with that staff code
query = (ptable.pe_label.like("%s%s" % (code, "_" * DIGITS))) & \
(ptable.pe_label >= "%s%s1" % (code, | |
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content. Often,
# this is a reference to an implementation guide that defines the special rules
# along with other profiles etc.
StructField(
"implicitRules",
uriSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".implicitrules",
),
True,
),
# The base language in which the resource is written.
StructField(
"language",
codeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".language",
),
True,
),
# A human-readable narrative that contains a summary of the resource and can be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the resource. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the resource and that modifies the understanding of the element
# that contains it and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer is allowed to define an extension, there is a set of requirements
# that SHALL be met as part of the definition of the extension. Applications
# processing a resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Identifiers assigned to this research study by the sponsor or other systems.
StructField(
"identifier",
ArrayType(
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# A short, descriptive user-friendly label for the study.
StructField("title", StringType(), True),
# The set of steps expected to be performed as part of the execution of the
# study.
StructField(
"protocol",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# A larger research study of which this particular study is a component or step.
StructField(
"partOf",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# The current state of the study.
StructField("status", StringType(), True),
# The type of study based upon the intent of the study's activities. A
# classification of the intent of the study.
StructField(
"primaryPurposeType",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The stage in the progression of a therapy from initial experimental use in
# humans in clinical trials to post-market evaluation.
StructField(
"phase",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Codes categorizing the type of study such as investigational vs.
# observational, type of blinding, type of randomization, safety vs. efficacy,
# etc.
StructField(
"category",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# The medication(s), food(s), therapy(ies), device(s) or other concerns or
# interventions that the study is seeking to gain more information about.
StructField(
"focus",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# The condition that is the focus of the study. For example, In a study to
# examine risk factors for Lupus, might have as an inclusion criterion "healthy
# volunteer", but the target condition code would be a Lupus SNOMED code.
StructField(
"condition",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Contact details to assist a user in learning more about or engaging with the
# study.
StructField(
"contact",
ArrayType(
ContactDetailSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Citations, references and other related documents.
StructField(
"relatedArtifact",
ArrayType(
RelatedArtifactSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Key terms to aid in searching for or filtering the study.
StructField(
"keyword",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Indicates a country, state or other region where the study is taking place.
StructField(
"location",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# A full description of how the study is being conducted.
StructField(
"description",
markdownSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".description",
),
True,
),
# Reference to a Group that defines the criteria for and quantity of subjects
# participating in the study. E.g. " 200 female Europeans between the ages of
# 20 and 45 with early onset diabetes".
StructField(
"enrollment",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Identifies the start date and the expected (or actual, depending on status)
# end date for the study.
StructField(
"period",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# An organization that initiates the investigation and is legally responsible
# for the study.
StructField(
"sponsor",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# A researcher in a study who oversees multiple aspects of the study, such as
# concept development, protocol writing, protocol submission for IRB approval,
# participant recruitment, informed consent, data collection, analysis,
# interpretation and presentation.
StructField(
"principalInvestigator",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# A facility in which study activities are conducted.
StructField(
"site",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
| |
packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Data_1Hz['Alt_1Hz'] = np.zeros((n_records),dtype=np.int32)
#-- Roll: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Roll'] = np.zeros((n_records),dtype=np.int32)
#-- Pitch: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Pitch'] = np.zeros((n_records),dtype=np.int32)
#-- Yaw: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Yaw'] = np.zeros((n_records),dtype=np.int32)
Data_1Hz['Spare'] = np.zeros((n_records),dtype=np.int16)
#-- Number of valid records in the block of twenty that contain data
#-- Last few records of the last block of a dataset may be blank blocks
#-- inserted to bring the file up to a multiple of twenty.
Data_1Hz['N_valid'] = np.zeros((n_records),dtype=np.int16)
#-- CryoSat-2 geophysical corrections (External Corrections Group)
Corrections = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Corrections['dryTrop'] = np.zeros((n_records),dtype=np.int16)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Corrections['wetTrop'] = np.zeros((n_records),dtype=np.int16)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Corrections['InvBar'] = np.zeros((n_records),dtype=np.int16)
#-- Dynamic Atmosphere Correction packed units (mm, 1e-3 m)
Corrections['DAC'] = np.zeros((n_records),dtype=np.int16)
#-- Ionospheric Correction packed units (mm, 1e-3 m)
Corrections['Iono'] = np.zeros((n_records),dtype=np.int16)
#-- Sea State Bias Correction packed units (mm, 1e-3 m)
Corrections['SSB'] = np.zeros((n_records),dtype=np.int16)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Corrections['ocTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Corrections['lpeTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Corrections['olTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Corrections['seTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Corrections['gpTideElv'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare1'] = np.zeros((n_records),dtype=np.int16)
#-- Surface Type: Packed in groups of three bits for each of the 20 records
Corrections['Surf_type'] = np.zeros((n_records),dtype=np.uint64)
#-- Mean Sea Surface or Geoid packed units (mm, 1e-3 m)
Corrections['MSS_Geoid'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean Depth/Land Elevation Model (ODLE) packed units (mm, 1e-3 m)
Corrections['ODLE'] = np.zeros((n_records),dtype=np.int32)
#-- Ice Concentration packed units (%/100)
Corrections['Ice_conc'] = np.zeros((n_records),dtype=np.int16)
#-- Snow Depth packed units (mm, 1e-3 m)
Corrections['Snow_depth'] = np.zeros((n_records),dtype=np.int16)
#-- Snow Density packed units (kg/m^3)
Corrections['Snow_density'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare2'] = np.zeros((n_records),dtype=np.int16)
#-- Corrections Status Flag
Corrections['C_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Significant Wave Height (SWH) packed units (mm, 1e-3)
Corrections['SWH'] = np.zeros((n_records),dtype=np.int16)
#-- Wind Speed packed units (mm/s, 1e-3 m/s)
Corrections['Wind_speed'] = np.zeros((n_records),dtype=np.uint16)
Corrections['Spare3'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare4'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare5'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare6'] = np.zeros((n_records),dtype=np.int16)
#-- CryoSat-2 20 Hz data fields (Measurement Group)
#-- Derived from instrument measurement parameters
n_blocks = 20
Data_20Hz = {}
#-- Delta between the timestamps for 20Hz record and the 1Hz record
#-- D_time_mics packed units (microseconds)
Data_20Hz['D_time_mics'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['D_time_mics'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Data_20Hz['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Lat'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Data_20Hz['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Lon'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Measured elevation above ellipsoid from retracker 1: packed units (mm, 1e-3 m)
Data_20Hz['Elev_1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Elev_1'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Measured elevation above ellipsoid from retracker 2: packed units (mm, 1e-3 m)
Data_20Hz['Elev_2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Elev_2'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Measured elevation above ellipsoid from retracker 3: packed units (mm, 1e-3 m)
Data_20Hz['Elev_3'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Elev_3'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Sigma Zero Backscatter for retracker 1: packed units (1e-2 dB)
Data_20Hz['Sig0_1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Sig0_1'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Sigma Zero Backscatter for retracker 2: packed units (1e-2 dB)
Data_20Hz['Sig0_2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Sig0_2'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Sigma Zero Backscatter for retracker 3: packed units (1e-2 dB)
Data_20Hz['Sig0_3'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Sig0_3'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Freeboard: packed units (mm, 1e-3 m)
#-- -9999 default value indicates computation has not been performed
Data_20Hz['Freeboard'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Freeboard'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Interpolated Sea Surface Height Anomaly: packed units (mm, 1e-3 m)
Data_20Hz['SSHA_interp'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['SSHA_interp'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Interpolated Sea Surface Height measurement count
Data_20Hz['SSHA_interp_count'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['SSHA_interp_count'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Interpolation quality estimate RSS: packed units (mm, 1e-3 m)
Data_20Hz['SSHA_interp_RMS'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['SSHA_interp_RMS'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Peakiness: packed units (1e-2)
Data_20Hz['Peakiness'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
Data_20Hz['Peakiness'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Number of averaged echoes or beams
Data_20Hz['N_avg'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['N_avg'].mask = np.ones((n_records,n_blocks),dtype=bool)
Data_20Hz['Spare1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Spare1'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Quality flags
Data_20Hz['Quality_flag'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
Data_20Hz['Quality_flag'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Corrections Application Flag
Data_20Hz['Corrections_flag'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
Data_20Hz['Corrections_flag'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Quality metric for retracker 1
Data_20Hz['Quality_1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Quality_1'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Quality metric for retracker 2
Data_20Hz['Quality_2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Quality_2'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Quality metric for retracker 3
Data_20Hz['Quality_3'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Quality_3'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Location Group for record r
Data_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Second'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Siral_mode'][r] = np.fromfile(fid,dtype='>u8',count=1)
Data_1Hz['Lat_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Lon_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Alt_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Roll'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Pitch'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Yaw'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Spare'][r] = np.fromfile(fid,dtype='>i2',count=1)
Data_1Hz['N_valid'][r] = np.fromfile(fid,dtype='>i2',count=1)
#-- CryoSat-2 External Corrections Group for record r
Corrections['dryTrop'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['wetTrop'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['InvBar'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['DAC'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Iono'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['SSB'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['ocTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['lpeTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['olTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['seTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['gpTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare1'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Surf_type'][r] = np.fromfile(fid,dtype='>u8',count=1)
Corrections['MSS_Geoid'][r] = np.fromfile(fid,dtype='>i4',count=1)
Corrections['ODLE'][r] = np.fromfile(fid,dtype='>i4',count=1)
Corrections['Ice_conc'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Snow_depth'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Snow_density'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare2'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['C_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Corrections['SWH'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Wind_speed'][r] = np.fromfile(fid,dtype='>u2',count=1)
Corrections['Spare3'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare4'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare5'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare6'][r] = np.fromfile(fid,dtype='>i2',count=1)
#-- CryoSat-2 Measurements Group for record r and block b
for b in range(n_blocks):
Data_20Hz['D_time_mics'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Lat'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Lon'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Elev_1'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Elev_2'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Elev_3'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Sig0_1'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Sig0_2'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Sig0_3'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Freeboard'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['SSHA_interp'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['SSHA_interp_count'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['SSHA_interp_RMS'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Peakiness'].data[r,b] = np.fromfile(fid,dtype='>u2',count=1)
Data_20Hz['N_avg'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Spare1'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Quality_flag'].data[r,b] = np.fromfile(fid,dtype='>u4',count=1)
Data_20Hz['Corrections_flag'].data[r,b] = np.fromfile(fid,dtype='>u4',count=1)
Data_20Hz['Quality_1'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Quality_2'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Quality_3'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
#-- Set CryoSat-2 Measurements Group Masks for record r
Data_20Hz['D_time_mics'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Lat'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Lon'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Elev_1'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Elev_2'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Elev_3'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Sig0_1'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Sig0_2'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Sig0_3'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Freeboard'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['SSHA_interp'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['SSHA_interp_count'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['SSHA_interp_RMS'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Peakiness'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['N_avg'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Spare1'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Quality_flag'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Corrections_flag'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Quality_1'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Quality_2'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Quality_3'].mask[r,:Data_1Hz['N_valid'][r]] = False
#-- Bind all the bits of the l2_mds together into a single dictionary
CS_l2_mds = {}
CS_l2_mds['Data_1Hz'] = Data_1Hz
CS_l2_mds['Corrections'] = Corrections
CS_l2_mds['Data_20Hz'] = Data_20Hz
#-- return the output dictionary
return CS_l2_mds
#-- PURPOSE: Initiate L2 MDS variables for CryoSat Baseline D (netCDF4)
def cryosat_baseline_D(full_filename, UNPACK=False):
#-- open netCDF4 file for reading
fid = netCDF4.Dataset(os.path.expanduser(full_filename),'r')
#-- use original unscaled units unless UNPACK=True
fid.set_auto_scale(UNPACK)
#-- get dimensions
time_cor_01 = fid.variables['time_cor_01'][:].copy()
time_20_ku = fid.variables['time_20_ku'][:].copy()
n_records, = time_cor_01.shape
n_blocks = 20
#-- CryoSat-2 1 Hz data fields (Location Group)
#-- Time and Orbit Parameters plus Measurement Mode
Data_1Hz = {}
#-- Time (seconds since 2000-01-01)
Data_1Hz['Time'] = time_cor_01.copy()
#-- Time: day part
Data_1Hz['Day'] = np.array(time_cor_01/86400.0,dtype=np.int32)
#-- Time: second part
Data_1Hz['Second'] = np.array(time_cor_01-Data_1Hz['Day'][:]*86400.0,dtype=np.int32)
#-- Time: microsecond part
Data_1Hz['Micsec'] = np.array((time_cor_01-Data_1Hz['Day'][:]*86400.0-
Data_1Hz['Second'][:])*1e6,dtype=np.int32)
#-- Lat_1Hz: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Lat_1Hz'] = fid.variables['lat_01'][:].copy()
#-- Lon_1Hz: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Lon_1Hz'] = fid.variables['lon_01'][:].copy()
#-- Alt_1Hz: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Data_1Hz['Alt_1Hz'] = fid.variables['alt_01'][:].copy()
#-- Roll: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Roll'] = fid.variables['off_nadir_roll_angle_str_01'][:].copy()
#-- Pitch: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Pitch'] = fid.variables['off_nadir_pitch_angle_str_01'][:].copy()
#-- Yaw: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Yaw'] = fid.variables['off_nadir_yaw_angle_str_01'][:].copy()
#-- Number of valid records in the block of twenty that contain data
#-- Last few records of the last block of a dataset may be blank blocks
#-- inserted to bring the file up to a multiple of twenty.
Data_1Hz['N_valid'] = fid.variables['num_valid_01'][:].copy()
#-- add absolute orbit number to 1Hz data
Data_1Hz['Abs_Orbit'] = np.zeros((n_records),dtype=np.uint32)
Data_1Hz['Abs_Orbit'][:] = np.uint32(fid.abs_orbit_number)
#-- add ascending/descending flag to 1Hz data (A=ascending,D=descending)
Data_1Hz['Ascending_flag'] = np.zeros((n_records),dtype=bool)
Data_1Hz['Ascending_flag'][:] = (fid.ascending_flag == 'A')
#-- CryoSat-2 geophysical corrections (External Corrections Group)
Corrections = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 | |
#! /usr/bin/env python3
# Read from stdin, spit out C header or body.
import argparse
import copy
import fileinput
import re
from collections import namedtuple
Enumtype = namedtuple('Enumtype', ['name', 'value'])
type2size = {
'pad': 1,
'struct channel_id': 32,
'struct short_channel_id': 8,
'struct ipv6': 16,
'secp256k1_ecdsa_signature': 64,
'struct preimage': 32,
'struct pubkey': 33,
'struct sha256': 32,
'struct bitcoin_blkid': 32,
'struct bitcoin_txid': 32,
'struct secret': 32,
'u64': 8,
'u32': 4,
'u16': 2,
'u8': 1,
'bool': 1
}
# These struct array helpers require a context to allocate from.
varlen_structs = [
'peer_features',
'gossip_getnodes_entry',
'failed_htlc',
'utxo',
'bitcoin_tx',
'wirestring',
]
class FieldType(object):
def __init__(self, name):
self.name = name
def is_assignable(self):
return self.name in ['u8', 'u16', 'u32', 'u64', 'bool'] or self.name.startswith('enum ')
# We only accelerate the u8 case: it's common and trivial.
def has_array_helper(self):
return self.name in ['u8']
# Returns base size
@staticmethod
def _typesize(typename):
if typename in type2size:
return type2size[typename]
elif typename.startswith('struct ') or typename.startswith('enum '):
# We allow unknown structures/enums, for extensibility (can only happen
# if explicitly specified in csv)
return 0
else:
raise ValueError('Unknown typename {}'.format(typename))
# Full (message, fieldname)-mappings
typemap = {
('update_fail_htlc', 'reason'): FieldType('u8'),
('node_announcement', 'alias'): FieldType('u8'),
('update_add_htlc', 'onion_routing_packet'): FieldType('u8'),
('update_fulfill_htlc', 'payment_preimage'): FieldType('struct preimage'),
('error', 'data'): FieldType('u8'),
('shutdown', 'scriptpubkey'): FieldType('u8'),
('node_announcement', 'rgb_color'): FieldType('u8'),
('node_announcement', 'addresses'): FieldType('u8'),
('node_announcement', 'ipv6'): FieldType('struct ipv6'),
('announcement_signatures', 'short_channel_id'): FieldType('struct short_channel_id'),
('channel_announcement', 'short_channel_id'): FieldType('struct short_channel_id'),
('channel_update', 'short_channel_id'): FieldType('struct short_channel_id'),
('revoke_and_ack', 'per_commitment_secret'): FieldType('struct secret')
}
# Partial names that map to a datatype
partialtypemap = {
'signature': FieldType('secp256k1_ecdsa_signature'),
'features': FieldType('u8'),
'channel_id': FieldType('struct channel_id'),
'chain_hash': FieldType('struct bitcoin_blkid'),
'funding_txid': FieldType('struct bitcoin_txid'),
'pad': FieldType('pad'),
}
# Size to typename match
sizetypemap = {
33: FieldType('struct pubkey'),
32: FieldType('struct sha256'),
8: FieldType('u64'),
4: FieldType('u32'),
2: FieldType('u16'),
1: FieldType('u8')
}
# It would be nicer if we had put '*u8' in spec and disallowed bare lenvar.
# In practice we only recognize lenvar when it's the previous field.
# size := baresize | arraysize
# baresize := simplesize | lenvar
# simplesize := number | type
# arraysize := length '*' type
# length := lenvar | number
class Field(object):
def __init__(self, message, name, size, comments, prevname):
self.message = message
self.comments = comments
self.name = name
self.is_len_var = False
self.lenvar = None
self.num_elems = 1
self.optional = False
# ? means optional field (not supported for arrays)
if size.startswith('?'):
self.optional = True
size = size[1:]
# If it's an arraysize, swallow prefix.
elif '*' in size:
number = size.split('*')[0]
if number == prevname:
self.lenvar = number
else:
self.num_elems = int(number)
size = size.split('*')[1]
elif options.bolt and size == prevname:
# Raw length field, implies u8.
self.lenvar = size
size = '1'
# Bolts use just a number: Guess type based on size.
if options.bolt:
base_size = int(size)
self.fieldtype = Field._guess_type(message, self.name, base_size)
# There are some arrays which we have to guess, based on sizes.
tsize = FieldType._typesize(self.fieldtype.name)
if base_size % tsize != 0:
raise ValueError('Invalid size {} for {}.{} not a multiple of {}'
.format(base_size,
self.message,
self.name,
tsize))
self.num_elems = int(base_size / tsize)
else:
# Real typename.
self.fieldtype = FieldType(size)
def basetype(self):
base = self.fieldtype.name
if base.startswith('struct '):
base = base[7:]
elif base.startswith('enum '):
base = base[5:]
return base
def is_padding(self):
return self.name.startswith('pad')
# Padding is always treated as an array.
def is_array(self):
return self.num_elems > 1 or self.is_padding()
def is_variable_size(self):
return self.lenvar is not None
def needs_ptr_to_ptr(self):
return self.is_variable_size() or self.optional
def is_assignable(self):
if self.is_array() or self.needs_ptr_to_ptr():
return False
return self.fieldtype.is_assignable()
def has_array_helper(self):
return self.fieldtype.has_array_helper()
# Returns FieldType
@staticmethod
def _guess_type(message, fieldname, base_size):
# Check for full (message, fieldname)-matches
if (message, fieldname) in typemap:
return typemap[(message, fieldname)]
# Check for partial field names
for k, v in partialtypemap.items():
if k in fieldname:
return v
# Check for size matches
if base_size in sizetypemap:
return sizetypemap[base_size]
raise ValueError('Unknown size {} for {}'.format(base_size, fieldname))
fromwire_impl_templ = """bool fromwire_{name}({ctx}const void *p{args})
{{
{fields}
\tconst u8 *cursor = p;
\tsize_t plen = tal_count(p);
\tif (fromwire_u16(&cursor, &plen) != {enum.name})
\t\treturn false;
{subcalls}
\treturn cursor != NULL;
}}
"""
fromwire_header_templ = """bool fromwire_{name}({ctx}const void *p{args});
"""
towire_header_templ = """u8 *towire_{name}(const tal_t *ctx{args});
"""
towire_impl_templ = """u8 *towire_{name}(const tal_t *ctx{args})
{{
{field_decls}
\tu8 *p = tal_arr(ctx, u8, 0);
\ttowire_u16(&p, {enumname});
{subcalls}
\treturn memcheck(p, tal_count(p));
}}
"""
printwire_header_templ = """void printwire_{name}(const char *fieldname, const u8 *cursor);
"""
printwire_impl_templ = """void printwire_{name}(const char *fieldname, const u8 *cursor)
{{
\tsize_t plen = tal_count(cursor);
\tif (fromwire_u16(&cursor, &plen) != {enum.name}) {{
\t\tprintf("WRONG TYPE?!\\n");
\t\treturn;
\t}}
{subcalls}
\tif (plen != 0)
\t\tprintf("EXTRA: %s\\n", tal_hexstr(NULL, cursor, plen));
}}
"""
class CCode(object):
"""Simple class to create indented C code"""
def __init__(self):
self.indent = 1
self.single_indent = False
self.code = []
def append(self, lines):
for line in lines.split('\n'):
# Let us to the indenting please!
assert '\t' not in line
# Special case: } by itself is pre-unindented.
if line == '}':
self.indent -= 1
self.code.append("\t" * self.indent + line)
continue
self.code.append("\t" * self.indent + line)
if self.single_indent:
self.indent -= 1
self.single_indent = False
if line.endswith('{'):
self.indent += 1
elif line.endswith('}'):
self.indent -= 1
elif line.startswith('for') or line.startswith('if'):
self.indent += 1
self.single_indent = True
def __str__(self):
assert self.indent == 1
assert not self.single_indent
return '\n'.join(self.code)
class Message(object):
def __init__(self, name, enum, comments):
self.name = name
self.enum = enum
self.comments = comments
self.fields = []
self.has_variable_fields = False
def checkLenField(self, field):
# Optional fields don't have a len.
if field.optional:
return
for f in self.fields:
if f.name == field.lenvar:
if f.fieldtype.name != 'u16':
raise ValueError('Field {} has non-u16 length variable {} (type {})'
.format(field.name, field.lenvar, f.fieldtype.name))
if f.is_array() or f.needs_ptr_to_ptr():
raise ValueError('Field {} has non-simple length variable {}'
.format(field.name, field.lenvar))
f.is_len_var = True
f.lenvar_for = field
return
raise ValueError('Field {} unknown length variable {}'
.format(field.name, field.lenvar))
def addField(self, field):
# We assume field lengths are 16 bit, to avoid overflow issues and
# massive allocations.
if field.is_variable_size():
self.checkLenField(field)
self.has_variable_fields = True
elif field.basetype() in varlen_structs or field.optional:
self.has_variable_fields = True
self.fields.append(field)
def print_fromwire_array(self, subcalls, basetype, f, name, num_elems):
if f.has_array_helper():
subcalls.append('fromwire_{}_array(&cursor, &plen, {}, {});'
.format(basetype, name, num_elems))
else:
subcalls.append('for (size_t i = 0; i < {}; i++)'
.format(num_elems))
if f.fieldtype.is_assignable():
subcalls.append('({})[i] = fromwire_{}(&cursor, &plen);'
.format(name, basetype))
elif basetype in varlen_structs:
subcalls.append('({})[i] = fromwire_{}(ctx, &cursor, &plen);'
.format(name, basetype))
else:
subcalls.append('fromwire_{}(&cursor, &plen, {} + i);'
.format(basetype, name))
def print_fromwire(self, is_header):
ctx_arg = 'const tal_t *ctx, ' if self.has_variable_fields else ''
args = []
for f in self.fields:
if f.is_len_var or f.is_padding():
continue
elif f.is_array():
args.append(', {} {}[{}]'.format(f.fieldtype.name, f.name, f.num_elems))
else:
ptrs = '*'
# If we're handing a variable array, we need a ptr-to-ptr.
if f.needs_ptr_to_ptr():
ptrs += '*'
# If each type is a variable length, we need a ptr to that.
if f.basetype() in varlen_structs:
ptrs += '*'
args.append(', {} {}{}'.format(f.fieldtype.name, ptrs, f.name))
template = fromwire_header_templ if is_header else fromwire_impl_templ
fields = ['\t{} {};\n'.format(f.fieldtype.name, f.name) for f in self.fields if f.is_len_var]
subcalls = CCode()
for f in self.fields:
basetype = f.basetype()
for c in f.comments:
subcalls.append('/*{} */'.format(c))
if f.is_padding():
subcalls.append('fromwire_pad(&cursor, &plen, {});'
.format(f.num_elems))
elif f.is_array():
self.print_fromwire_array(subcalls, basetype, f, f.name,
f.num_elems)
elif f.is_variable_size():
subcalls.append("//2nd case {name}".format(name=f.name))
typename = f.fieldtype.name
# If structs are varlen, need array of ptrs to them.
if basetype in varlen_structs:
typename += ' *'
subcalls.append('*{} = {} ? tal_arr(ctx, {}, {}) : NULL;'
.format(f.name, f.lenvar, typename, f.lenvar))
self.print_fromwire_array(subcalls, basetype, f, '*' + f.name,
f.lenvar)
else:
if f.optional:
subcalls.append("if (!fromwire_bool(&cursor, &plen))\n"
"*{} = NULL;\n"
"else {{\n"
"*{} = tal(ctx, {});\n"
"fromwire_{}(&cursor, &plen, *{});\n"
"}}"
.format(f.name, f.name, f.fieldtype.name,
basetype, f.name))
elif f.is_assignable():
subcalls.append("//3th case {name}".format(name=f.name))
if f.is_len_var:
subcalls.append('{} = fromwire_{}(&cursor, &plen);'
.format(f.name, basetype))
else:
subcalls.append('*{} = fromwire_{}(&cursor, &plen);'
.format(f.name, basetype))
elif basetype in varlen_structs:
subcalls.append('*{} = fromwire_{}(ctx, &cursor, &plen);'
.format(f.name, basetype))
else:
subcalls.append('fromwire_{}(&cursor, &plen, {});'
.format(basetype, f.name))
return template.format(
name=self.name,
ctx=ctx_arg,
args=''.join(args),
fields=''.join(fields),
enum=self.enum,
subcalls=str(subcalls)
)
def print_towire_array(self, subcalls, basetype, f, num_elems):
if f.has_array_helper():
subcalls.append('towire_{}_array(&p, {}, {});'
.format(basetype, f.name, num_elems))
else:
subcalls.append('for (size_t i = 0; i < {}; i++)'
.format(num_elems))
if f.fieldtype.is_assignable() or basetype in varlen_structs:
subcalls.append('towire_{}(&p, {}[i]);'
.format(basetype, f.name))
else:
subcalls.append('towire_{}(&p, {} + i);'
.format(basetype, f.name))
def print_towire(self, is_header):
template = towire_header_templ if is_header else towire_impl_templ
args = []
for f in self.fields:
if f.is_padding() or f.is_len_var:
continue
if f.is_array():
args.append(', const {} {}[{}]'.format(f.fieldtype.name, f.name, f.num_elems))
elif f.is_assignable():
args.append(', {} {}'.format(f.fieldtype.name, f.name))
elif f.is_variable_size() and f.basetype() in varlen_structs:
args.append(', const {} **{}'.format(f.fieldtype.name, f.name))
| |
import os
# If server, need to use osmesa for pyopengl/pyrender
if os.cpu_count() > 20:
os.environ['PYOPENGL_PLATFORM'] = 'osmesa'
# https://github.com/marian42/mesh_to_sdf/issues/13
# https://pyrender.readthedocs.io/en/latest/install/index.html?highlight=ssh#getting-pyrender-working-with-osmesa
else:
os.environ['PYOPENGL_PLATFORM'] = 'egl' # default one was pyglet, which hangs sometime for unknown reason: https://github.com/marian42/mesh_to_sdf/issues/19;
import sys
import yaml
import logging
import logging.config
import time
import random
import math
import numpy as np
from numpy import array
import torch
import matplotlib.pyplot as plt
from src import INIT_TYPE, TEST_TYPE, GEN_TYPE
from src.sample_sdf import PointSampler
from src.sdf_net import SDFDecoder
from src.pointnet_encoder import PointNetEncoder
from src.cost_predictor import CostPredictor
from train_grasp import TrainGrasp
from src.dataset_grasp import TrainDataset
from eval_grasp import EvaluateGrasp
from util.misc import *
from util.mesh import *
class Runner:
def __init__(self, yaml_path, result_dir, device):
save__init__args(locals())
self.model_dir = result_dir + 'model/'
self.latent_img_dir = result_dir + 'latent_img/'
# Configure from yaml file
with open(yaml_path+'.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
self.config = config
self.voxel_resolution = config['voxel_resolution']
# always be one because of dataset design
self.batch_size = config['batch_size']
# NN params
self.dim_latent = config['dim_latent']
self.encoder_breadth = config['encoder_breadth']
self.decoder_breadth = config['decoder_breadth']
self.predictor_breadth = config['predictor_breadth']
# Set up networks, calculate number of params
self.encoder = PointNetEncoder(dim_latent=self.dim_latent,
breadth=self.encoder_breadth).to(device)
self.decoder = SDFDecoder(dim_latent=self.dim_latent,
breadth=self.decoder_breadth,
device=device).to(device)
self.predictor = CostPredictor(dim_latent=self.dim_latent,
dim_hidden=self.predictor_breadth).to(device)
print('Num of encoder parameters: %d' % sum(p.numel() for p in self.encoder.parameters() if p.requires_grad))
print('Num of decoder parameters: %d' % sum(p.numel() for p in self.decoder.parameters() if p.requires_grad))
print('Num of cost predictor parameters: %d' % sum(p.numel() for p in self.predictor.parameters() if p.requires_grad))
# Use one GPU
self.decoder_accessor = self.decoder
self.predictor_accessor = self.predictor
# Set up optimizer
self.optimizer = torch.optim.AdamW([
{'params': self.encoder.parameters(),
'lr': config['encoder_lr'],
'weight_decay': config['encoder_weight_decay']},
{'params': self.decoder.parameters(),
'lr': config['decoder_lr'],
'weight_decay': config['decoder_weight_decay']},
{'params': self.predictor.parameters(),
'lr': config['predictor_lr'],
'weight_decay': config['predictor_weight_decay']},
])
if config['decayLR_use']:
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.optimizer,
milestones=config['decayLR_milestones'],
gamma=config['decayLR_gamma'])
else:
self.scheduler = None
def create_dataset(self, env_dir_dict, embed_id_dir_dict,
num_sdf_available_per_obj, num_sdf_per_obj,
num_surface_per_obj, **kwargs):
'''
Create dataholder, to be updated once new distribution generated
# num_sdf_available_per_obj: number of sdf points for each object available before downsampled
# num_sdf_per_obj: number of sdf points for each object - target!
# num_surface_per_obj: number of surface points for each object (for pointnet encoder)
'''
self.train_data = TrainDataset(env_dir_dict,
embed_id_dir_dict,
num_sdf_available_per_obj,
num_sdf_per_obj,
num_surface_per_obj,
device='cpu')
self.train_dataloader = torch.utils.data.DataLoader(
self.train_data,
batch_size=self.batch_size,
shuffle=True,
drop_last=True,
pin_memory=True,
num_workers=4)
def embed(self, epoch, norm_loss_ratio, latent_all, label_all, num_sdf_per_obj, clamp_lip):
"""
Resets latent
"""
epoch_loss = 0
epoch_rec_loss = 0
epoch_reg_loss = 0
epoch_lip_loss = 0
num_batch = 0
# Switch NN mode
self.encoder.train()
self.decoder.train()
self.predictor.train()
l2 = torch.nn.MSELoss(reduction='none')
# Save all the predictions for debugging
pred_all = np.empty((0))
# Run batches
for batch_ind, data_batch in enumerate(self.train_dataloader):
# Zero gradient
self.optimizer.zero_grad(set_to_none=True)
###################### Extract data ######################
batch_sdf, batch_surface, batch_obj_id_chosen = data_batch
batch_sdf = batch_sdf.reshape(-1,4).to(self.device)
batch_sdf_values = batch_sdf[:,-1]
batch_sdf_points = batch_sdf[:,:3]
batch_surface = batch_surface.to(self.device)
batch_obj_id_chosen = batch_obj_id_chosen.squeeze(0)
###################### Encode ######################
batch_latent = self.encoder.forward(batch_surface) # batch x latent
###################### Decode ######################
batch_latent_all = batch_latent.repeat_interleave(num_sdf_per_obj, dim=0) # Assign latent to each point of the object
batch_sdf_pred = self.decoder.forward(batch_sdf_points, batch_latent_all) # Decode each latent/point to get sdf predictions
###################### Rec loss ######################
rec_loss = torch.mean((batch_sdf_pred - batch_sdf_values)**2)
###################### Reg loss ######################
batch_reward_pred = self.predictor.forward(batch_latent).flatten()
batch_label = torch.from_numpy(label_all[batch_obj_id_chosen]).float().to(self.device)
reg_loss = torch.mean(l2(batch_reward_pred, batch_label))
###################### Lip loss ######################
if clamp_lip is None:
lip_loss = torch.linalg.norm(self.predictor_accessor.linear_hidden[0].weight, ord=2)+torch.linalg.norm(self.predictor_accessor.linear_out[0].weight, ord=2) # spectral norm
else:
lip_loss = (torch.linalg.norm(self.predictor_accessor.linear_hidden[0].weight, ord=2)+torch.linalg.norm(self.predictor_accessor.linear_out[0].weight, ord=2)-clamp_lip*16)**2 # clamping
# Add reconstruction and regularization losses together
batch_loss = rec_loss+\
self.config['reg_loss_ratio']*reg_loss+\
self.config['lip_loss_ratio']*lip_loss+\
norm_loss_ratio*torch.mean(batch_latent**2)
# Backward pass to get gradients
batch_loss.backward()
# Clip gradient if specified
if self.config['gradientClip_use']:
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), self.config['gradientClip_thres'])
torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), self.config['gradientClip_thres'])
torch.nn.utils.clip_grad_norm_(self.predictor.parameters(), self.config['gradientClip_thres'])
# Update weights using gradient
self.optimizer.step()
# Store loss
epoch_loss += batch_loss.item()
epoch_rec_loss += rec_loss.item()
epoch_reg_loss += reg_loss.item()
epoch_lip_loss += lip_loss.item()
num_batch += 1
# Update latents for all distributions
latent_all[batch_obj_id_chosen] =batch_latent.detach().cpu().numpy()
pred_all = np.concatenate((pred_all, batch_reward_pred.detach().cpu().numpy()))
# Decay learning rate if specified
if self.scheduler is not None:
self.scheduler.step()
# Get batch average loss
epoch_loss /= num_batch
epoch_rec_loss /= num_batch
epoch_reg_loss /= num_batch
epoch_lip_loss /= num_batch
return epoch_loss, epoch_rec_loss, epoch_reg_loss, epoch_lip_loss, latent_all, pred_all
def get_predictor_lip(self):
return self.predictor_accessor.get_lip()
def encode_batch(self, surface_batch):
"""
Assume the shape as N x num_surface_per_obj x 3
"""
surface_test = torch.from_numpy(surface_batch).float().to(self.device)
latent_test = self.encoder.forward(surface_test) # num_test_obj x latent_dim
return latent_test
def predict(self, latent):
"""
Using the cost predictor
"""
if isinstance(latent, np.ndarray):
latent = torch.from_numpy(latent).float().to(self.device)
with torch.no_grad():
pred = self.predictor.forward(latent).detach().cpu()
return pred.squeeze(1).numpy()
# return torch.where(pred > 0.5, 1., 0.).numpy()
def adversarial(self, latent, eta=1.0, gamma=1.0, steps=10, target_drop=0.0):
"""
Adversarially perturb latent using the cost predictor and evaluated label/cost. Following https://github.com/duchi-lab/certifiable-distributional-robustness/blob/master/attacks_tf.py
Also see https://github.com/ricvolpi/generalize-unseen-domains/blob/master/model.py
Only takes a single datapoint for now; tricky to get batch to work
"""
l2 = torch.nn.MSELoss()
latent = torch.from_numpy(latent).float().to(self.device).requires_grad_().reshape(1,-1)
latent_detach = latent.detach()
# Gradient ascent
max_num_itr = 10
for _ in range(max_num_itr):
# make a copy
eta_env = eta
gamma_env = gamma
latent_adv = latent.clone()
ini_pred_reward = self.predictor.forward(latent_adv)
latent_path_all = np.zeros((steps+1, latent.shape[1]))
latent_path_all[0] = latent_adv.detach().cpu().numpy()
for step in range(steps):
pred_reward = self.predictor.forward(latent_adv) # reward
loss = -pred_reward - gamma_env*l2(latent_adv, latent_detach)
grad = torch.autograd.grad(loss, latent_adv)[0] # returns a tuple of grads
latent_adv += eta_env*grad
# logging.info(f'step {step}, pred {pred_reward.item()}')
latent_path_all[step+1] = latent_adv.detach().cpu().numpy()
if (ini_pred_reward-pred_reward) > target_drop*1.5:
eta *= 0.8 # too much perturbation
gamma *= 2.0
elif (ini_pred_reward-pred_reward) > target_drop:
break # good
else:
eta *= 1.2 # too little perturbation
gamma *= 0.5
return latent_adv.detach().cpu().numpy(), latent_path_all
def generate(self, epoch, gen_dir, base_latent_all, eta, gamma, steps, target_drop=0.1, max_num_attempt=5):
"""
Generate new objects by adversarially perturbing existing latents using the cost predictor
Sometimes some latent cannot generate new object, so we need to re-sample latent adversarially for the same new distribution
"""
num_new = len(base_latent_all)
old_latent_all = base_latent_all
new_latent_all = np.zeros((num_new, self.dim_latent))
# Another attempt if not all objects processed
flags = np.ones((num_new))
height_all = np.zeros((num_new))
keep_concave_part = config['keep_concave_part']
for _ in range(max_num_attempt):
for env_ind in range(num_new):
# Skip if already generated
if flags[env_ind] < 1:
continue
# Generate new
old_latent = base_latent_all[env_ind]
new_latent, latent_path_all = self.adversarial(
latent=old_latent,
eta=eta, gamma=gamma, steps=steps,
target_drop=target_drop)
# Get mesh using decoder, possibly corrupt
old_mesh = self.decoder_accessor.get_mesh(torch.from_numpy(old_latent).float().to(self.device), voxel_resolution=self.voxel_resolution)
new_mesh = self.decoder_accessor.get_mesh(torch.from_numpy(new_latent).float().to(self.device), voxel_resolution=self.voxel_resolution)
if new_mesh is None or old_mesh is None:
print('Cannot generate from latent!')
continue
# Try processing
try:
old_mesh = process_mesh(old_mesh,
scale_down=True,
smooth=False, #!
random_scale=False)
new_mesh = process_mesh(new_mesh,
scale_down=True,
smooth=False, #!
random_scale=False)
# Scale to original height
new_mesh = match_mesh_height(new_mesh, old_mesh)
# Export as decomposed stl and urdf - create new subdir for convex obj - for pybullet
ensure_directory_hard(gen_dir + str(env_ind) + '/')
convex_pieces = save_convex_urdf(new_mesh,
gen_dir,
env_ind,
mass=0.1,
keep_concave_part=keep_concave_part)
except:
print('Cannot process generated!')
continue
if len(convex_pieces) > 20:
print('Too concave!')
continue
#? Use decompsoed parts as stl? avoid peculiarities when sampling sdf and causing reconstruction issue
if keep_concave_part: # Export as (un-decomposed) stl - for sdf
save_mesh = new_mesh
else:
save_mesh = create_mesh_from_pieces(convex_pieces)
save_mesh.export(gen_dir+str(env_ind)+'.stl')
# Add to all sampled dist; mark generated
new_latent_all[env_ind] = new_latent
flags[env_ind] = 0
height_all[env_ind]=(save_mesh.bounds[1]-save_mesh.bounds[0])[2]
# Quit if all objects perturbed
if np.sum(flags) < 1e-3:
break
# Find closer latent
eta /= 2
gamma *= 2
# steps = min(int(steps/2), 1)
logging.info(f'Epoch {epoch} generate, double gamma locally')
return old_latent_all, new_latent_all, flags, height_all
def visualize(self, old_latent_all, new_latent_all, num_random_obj=20):
"""
Sample latent from all existing and visualize objects
"""
num_obj_generated = 0
num_obj_attempt = 0
obj_ind_all = random.sample(range(new_latent_all.shape[0]), k=num_random_obj)
# Use subplots for all objects
fig_obj, _ = plt.subplots(5, 4) # assume 20 rn
while num_obj_generated < num_random_obj:
# Sample more if used up
if num_obj_attempt >= num_random_obj:
obj_ind_all = random.sample(range(new_latent_all.shape[0]), k=num_random_obj)
num_obj_attempt = 0
# Extract sample
old_obj = old_latent_all[obj_ind_all[num_obj_attempt]]
new_obj = new_latent_all[obj_ind_all[num_obj_attempt]]
# Try
num_obj_attempt += 1
# Reconstruct mesh from latent
old_mesh = self.decoder_accessor.get_mesh(torch.from_numpy(old_obj).float().to(self.device), voxel_resolution=self.voxel_resolution)
new_mesh = self.decoder_accessor.get_mesh(torch.from_numpy(new_obj).float().to(self.device), voxel_resolution=self.voxel_resolution)
if old_mesh is None or new_mesh is None:
print('Cannot generate sample!')
continue
# Center, orient, scale
try:
old_mesh = process_mesh(old_mesh,
scale_down=True,
smooth=False,
random_scale=False)
new_mesh = process_mesh(new_mesh,
scale_down=True,
smooth=False,
random_scale=False)
except:
print('Cannot process sampled!')
continue
# Save mesh for inspection - bot not decomposed
if num_obj_generated < 5:
old_mesh.export(self.latent_img_dir+str(epoch)+'_'+str(num_obj_generated)+'_old.stl')
new_mesh.export(self.latent_img_dir+str(epoch)+'_'+str(num_obj_generated)+'_new.stl')
# Predict
old_reward =self.predict(latent=old_obj.reshape(1,-1))[0]
new_reward =self.predict(latent=new_obj.reshape(1,-1))[0]
# Save image of 2D cross section
slice_2D_old, _ = old_mesh.section(plane_origin=old_mesh.centroid,
plane_normal=[0,0,1]).to_planar()
slice_2D_new, _ = new_mesh.section(plane_origin=new_mesh.centroid,
plane_normal=[0,0,1]).to_planar()
ax = fig_obj.axes[num_obj_generated]
ax.set_aspect('equal')
ax.scatter(slice_2D_old.vertices[:,0], slice_2D_old.vertices[:,1],
s=1,color='lightgray')
ax.scatter(slice_2D_new.vertices[:,0], slice_2D_new.vertices[:,1],
s=2,color='gray')
ax.text(x=0., y=0.01, s="{:.2f}".format(old_reward), fontsize=12, color='coral')
ax.text(x=0., y=-0.01, s="{:.2f}".format(new_reward), fontsize=12, color='red')
ax.axis('off')
# Count
num_obj_generated += 1
plt.savefig(self.latent_img_dir+str(epoch)+'_random_obj.png')
plt.close()
def save_model(self, dir):
torch.save(self.encoder.state_dict(), dir+'encoder.pt')
torch.save(self.decoder.state_dict(), dir+'decoder.pt')
torch.save(self.predictor.state_dict(), dir+'predictor.pt')
def load_model(self, dir):
self.encoder.load_state_dict(torch.load(dir+'encoder.pt', map_location=self.device))
self.decoder.load_state_dict(torch.load(dir+'decoder.pt', map_location=self.device))
self.predictor.load_state_dict(torch.load(dir+'predictor.pt', map_location=self.device))
def get_non_test_num_env_list(env_dict, dir_type_all=[INIT_TYPE, GEN_TYPE]):
l = []
for env_id_list, _, dir_type in env_dict.values():
if dir_type in dir_type_all:
l += [len(env_id_list)]
return l
if __name__ == '__main__':
# from IPython import embed; embed()
if os.cpu_count() > 20: # somehow on server, the default fork method does not work with pytorch, but works fine on desktop
import multiprocessing
multiprocessing.set_start_method('forkserver')
# Read config
yaml_file_name = sys.argv[1]
yaml_path = 'configs/'+yaml_file_name
with open(yaml_path+'.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# Fix seeds
seed = config['seed']
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = True # may speed up
# Hardware
cuda_idx = config['cuda_idx']
device = 'cuda:'+str(cuda_idx)
# Misc
num_eval_per_env = config['num_eval_per_env']
dim_latent = config['dim_latent']
norm_loss_ratio = config['norm_loss_ratio']
clamp_lip = config['clamp_lip']
# Data
initial_env_dir_list = config['initial_env_dir_list']
num_env_per_initial_dir = config['num_env_per_initial_dir']
test_env_dir_list = config['test_env_dir_list']
num_env_per_test_dir = config['num_env_per_test_dir']
# Generation (from latent)
num_epoch_per_gen = config['num_epoch_per_gen']
num_epoch_before_first_gen = config['num_epoch_before_first_gen']
num_env_per_gen = config['num_env_per_gen']
# Improving policy
num_env_per_retrain = config['num_env_per_retrain']
num_epoch_per_retrain = config['num_epoch_per_retrain']
num_epoch_before_first_retrain = config['num_epoch_before_first_retrain']
mu_list = config['mu_list']
mu = config['mu']
sigma = config['sigma']
retrain_args = config['retrain_args']
eval_args = config['eval_args']
# Adversarial (gradient ascent)
eta = config['eta']
gamma = config['gamma']
ga_steps = config['ga_steps']
target_drop_percentage = config['target_drop_percentage']
target_drop_percentage_rate = config['target_drop_percentage_rate']
# Env params
sdf_args = config['sdf_args']
# Initialize folders
data_parent_dir = config['data_parent_dir']
result_dir = 'result/'+yaml_file_name+'/'
model_dir = result_dir + 'runner_model/'
latent_img_dir = result_dir + 'latent_img/'
data_dir = data_parent_dir+yaml_file_name+'/'
ensure_directory(result_dir)
ensure_directory(model_dir)
ensure_directory(latent_img_dir)
ensure_directory(data_dir)
# Initialize dir dict: key is dir_path, value is a tuple of (1) id list and (2) type (0 for initial, 1 for test, 2 for gen)
env_dir_dict = {}
for env_dir in initial_env_dir_list:
height_all =list(np.load(env_dir+'dim.npy')[:num_env_per_initial_dir,2])
env_dir_dict[env_dir] = ([*range(num_env_per_initial_dir)], height_all, INIT_TYPE)
# Save a copy of configuration
with open(result_dir+'config.yaml', 'w') as f:
yaml.dump(config, f, sort_keys=False)
# Initialize evaluating policy (always cpu)
evaluator = EvaluateGrasp(initial_policy_path=None,
mu_list=mu_list, mu=mu, sigma=sigma, **eval_args)
# Initialize training policy
trainer = TrainGrasp(result_dir=result_dir, device=device,
mu=mu, sigma=sigma, **retrain_args)
# Initialize running env
runner = Runner(yaml_path=yaml_path, result_dir=result_dir, device=device)
# Initialize point sampler
point_sampler = PointSampler(**sdf_args)
# Training details to be recorded
train_loss_list = []
train_rec_loss_list = []
train_reg_loss_list = []
train_lip_loss_list = []
train_success_list = []
test_success_list = []
train_lip_list = []
# Save the latent and (groun-truth) label/reward of all images
latent_all = np.zeros((num_env_per_initial_dir*len(initial_env_dir_list),
dim_latent))
# Add test dir to dict
for env_dir in test_env_dir_list:
height_all = list(np.load(env_dir+'dim.npy')[:num_env_per_test_dir,2])
env_dir_dict[env_dir] = ([*range(num_env_per_test_dir)], height_all, TEST_TYPE)
# Name of saved training details
train_details_path = None
# Initialize counter
num_epoch_since_last_gen = 0
num_epoch_since_last_retrain = 0
num_env_gen = 0
num_dir_gen = 0
num_retrain = 0
# Logging
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
logging.basicConfig(filename=result_dir+'log.txt',
level=logging.NOTSET,
format='%(process)d-%(levelname)s-%(asctime)s-%(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.info('start')
# Run
num_epoch = (config['num_retrain']-2)*num_epoch_per_retrain+num_epoch_before_first_retrain # minus 2 to account for retrain at epoch 0
epoch = 0
while epoch <= num_epoch:
# Record time for each epoch
epoch_start_time = | |
= -0.0000962359705999109
struct[0].Gy[1,31] = 0.000333217048202192
struct[0].Gy[2,2] = -1
struct[0].Gy[2,16] = 0.000966678295179781
struct[0].Gy[2,17] = -0.00000962359705999139
struct[0].Gy[2,18] = 0.00273067829517978
struct[0].Gy[2,19] = -0.00706562359705999
struct[0].Gy[2,20] = 0.000966678295179781
struct[0].Gy[2,21] = -0.00000962359705999139
struct[0].Gy[2,22] = 0.000966678295179781
struct[0].Gy[2,23] = -0.00000962359705999124
struct[0].Gy[2,24] = 0.000966678295179781
struct[0].Gy[2,25] = -0.00000962359705999132
struct[0].Gy[2,26] = 0.0194306782951798
struct[0].Gy[2,27] = -0.0150656235970600
struct[0].Gy[2,28] = 0.000966678295179781
struct[0].Gy[2,29] = -0.00000962359705999132
struct[0].Gy[2,30] = 0.000333217048202192
struct[0].Gy[2,31] = 0.0000962359705999109
struct[0].Gy[3,3] = -1
struct[0].Gy[3,16] = 0.00000962359705999139
struct[0].Gy[3,17] = 0.000966678295179781
struct[0].Gy[3,18] = 0.00706562359705999
struct[0].Gy[3,19] = 0.00273067829517978
struct[0].Gy[3,20] = 0.00000962359705999139
struct[0].Gy[3,21] = 0.000966678295179781
struct[0].Gy[3,22] = 0.00000962359705999124
struct[0].Gy[3,23] = 0.000966678295179781
struct[0].Gy[3,24] = 0.00000962359705999132
struct[0].Gy[3,25] = 0.000966678295179781
struct[0].Gy[3,26] = 0.0150656235970600
struct[0].Gy[3,27] = 0.0194306782951798
struct[0].Gy[3,28] = 0.00000962359705999132
struct[0].Gy[3,29] = 0.000966678295179781
struct[0].Gy[3,30] = -0.0000962359705999109
struct[0].Gy[3,31] = 0.000333217048202192
struct[0].Gy[4,4] = -1
struct[0].Gy[4,16] = 0.000966678295179781
struct[0].Gy[4,17] = -0.00000962359705999139
struct[0].Gy[4,18] = 0.000966678295179781
struct[0].Gy[4,19] = -0.00000962359705999139
struct[0].Gy[4,20] = 0.00273067829517978
struct[0].Gy[4,21] = -0.00706562359705999
struct[0].Gy[4,22] = 0.000966678295179781
struct[0].Gy[4,23] = -0.00000962359705999124
struct[0].Gy[4,24] = 0.000966678295179781
struct[0].Gy[4,25] = -0.00000962359705999132
struct[0].Gy[4,26] = 0.000966678295179781
struct[0].Gy[4,27] = -0.00000962359705999132
struct[0].Gy[4,28] = 0.0194306782951798
struct[0].Gy[4,29] = -0.0150656235970600
struct[0].Gy[4,30] = 0.000333217048202192
struct[0].Gy[4,31] = 0.0000962359705999109
struct[0].Gy[5,5] = -1
struct[0].Gy[5,16] = 0.00000962359705999139
struct[0].Gy[5,17] = 0.000966678295179781
struct[0].Gy[5,18] = 0.00000962359705999139
struct[0].Gy[5,19] = 0.000966678295179781
struct[0].Gy[5,20] = 0.00706562359705999
struct[0].Gy[5,21] = 0.00273067829517978
struct[0].Gy[5,22] = 0.00000962359705999124
struct[0].Gy[5,23] = 0.000966678295179781
struct[0].Gy[5,24] = 0.00000962359705999132
struct[0].Gy[5,25] = 0.000966678295179781
struct[0].Gy[5,26] = 0.00000962359705999132
struct[0].Gy[5,27] = 0.000966678295179781
struct[0].Gy[5,28] = 0.0150656235970600
struct[0].Gy[5,29] = 0.0194306782951798
struct[0].Gy[5,30] = -0.0000962359705999109
struct[0].Gy[5,31] = 0.000333217048202192
struct[0].Gy[6,6] = -1
struct[0].Gy[6,16] = 0.000333217048202192
struct[0].Gy[6,17] = 0.0000962359705999109
struct[0].Gy[6,18] = 0.000333217048202192
struct[0].Gy[6,19] = 0.0000962359705999109
struct[0].Gy[6,20] = 0.000333217048202192
struct[0].Gy[6,21] = 0.0000962359705999109
struct[0].Gy[6,22] = 0.000333217048202192
struct[0].Gy[6,23] = 0.0000962359705999110
struct[0].Gy[6,24] = 0.000333217048202192
struct[0].Gy[6,25] = 0.0000962359705999109
struct[0].Gy[6,26] = 0.000333217048202192
struct[0].Gy[6,27] = 0.0000962359705999109
struct[0].Gy[6,28] = 0.000333217048202192
struct[0].Gy[6,29] = 0.0000962359705999109
struct[0].Gy[6,30] = 0.00666782951797808
struct[0].Gy[6,31] = -0.000962359705999110
struct[0].Gy[7,7] = -1
struct[0].Gy[7,16] = -0.0000962359705999109
struct[0].Gy[7,17] = 0.000333217048202192
struct[0].Gy[7,18] = -0.0000962359705999109
struct[0].Gy[7,19] = 0.000333217048202192
struct[0].Gy[7,20] = -0.0000962359705999109
struct[0].Gy[7,21] = 0.000333217048202192
struct[0].Gy[7,22] = -0.0000962359705999110
struct[0].Gy[7,23] = 0.000333217048202192
struct[0].Gy[7,24] = -0.0000962359705999109
struct[0].Gy[7,25] = 0.000333217048202192
struct[0].Gy[7,26] = -0.0000962359705999109
struct[0].Gy[7,27] = 0.000333217048202192
struct[0].Gy[7,28] = -0.0000962359705999109
struct[0].Gy[7,29] = 0.000333217048202192
struct[0].Gy[7,30] = 0.000962359705999110
struct[0].Gy[7,31] = 0.00666782951797808
struct[0].Gy[8,8] = -1
struct[0].Gy[8,16] = 0.00273067829517978
struct[0].Gy[8,17] = -0.00706562359705999
struct[0].Gy[8,18] = 0.000966678295179781
struct[0].Gy[8,19] = -0.00000962359705999132
struct[0].Gy[8,20] = 0.000966678295179781
struct[0].Gy[8,21] = -0.00000962359705999132
struct[0].Gy[8,22] = 0.000966678295179781
struct[0].Gy[8,23] = -0.00000962359705999127
struct[0].Gy[8,24] = 0.00273067829517978
struct[0].Gy[8,25] = -0.00706562359705999
struct[0].Gy[8,26] = 0.000966678295179781
struct[0].Gy[8,27] = -0.00000962359705999127
struct[0].Gy[8,28] = 0.000966678295179781
struct[0].Gy[8,29] = -0.00000962359705999127
struct[0].Gy[8,30] = 0.000333217048202192
struct[0].Gy[8,31] = 0.0000962359705999109
struct[0].Gy[9,9] = -1
struct[0].Gy[9,16] = 0.00706562359705999
struct[0].Gy[9,17] = 0.00273067829517978
struct[0].Gy[9,18] = 0.00000962359705999132
struct[0].Gy[9,19] = 0.000966678295179781
struct[0].Gy[9,20] = 0.00000962359705999132
struct[0].Gy[9,21] = 0.000966678295179781
struct[0].Gy[9,22] = 0.00000962359705999127
struct[0].Gy[9,23] = 0.000966678295179781
struct[0].Gy[9,24] = 0.00706562359705999
struct[0].Gy[9,25] = 0.00273067829517978
struct[0].Gy[9,26] = 0.00000962359705999127
struct[0].Gy[9,27] = 0.000966678295179781
struct[0].Gy[9,28] = 0.00000962359705999127
struct[0].Gy[9,29] = 0.000966678295179781
struct[0].Gy[9,30] = -0.0000962359705999109
struct[0].Gy[9,31] = 0.000333217048202192
struct[0].Gy[10,10] = -1
struct[0].Gy[10,16] = 0.000966678295179781
struct[0].Gy[10,17] = -0.00000962359705999132
struct[0].Gy[10,18] = 0.00273067829517978
struct[0].Gy[10,19] = -0.00706562359705999
struct[0].Gy[10,20] = 0.000966678295179781
struct[0].Gy[10,21] = -0.00000962359705999132
struct[0].Gy[10,22] = 0.000966678295179781
struct[0].Gy[10,23] = -0.00000962359705999127
struct[0].Gy[10,24] = 0.000966678295179781
struct[0].Gy[10,25] = -0.00000962359705999127
struct[0].Gy[10,26] = 0.00273067829517978
struct[0].Gy[10,27] = -0.00706562359705999
struct[0].Gy[10,28] = 0.000966678295179781
struct[0].Gy[10,29] = -0.00000962359705999127
struct[0].Gy[10,30] = 0.000333217048202192
struct[0].Gy[10,31] = 0.0000962359705999109
struct[0].Gy[11,11] = -1
struct[0].Gy[11,16] = 0.00000962359705999132
struct[0].Gy[11,17] = 0.000966678295179781
struct[0].Gy[11,18] = 0.00706562359705999
struct[0].Gy[11,19] = 0.00273067829517978
struct[0].Gy[11,20] = 0.00000962359705999132
struct[0].Gy[11,21] = 0.000966678295179781
struct[0].Gy[11,22] = 0.00000962359705999127
struct[0].Gy[11,23] = 0.000966678295179781
struct[0].Gy[11,24] = 0.00000962359705999127
struct[0].Gy[11,25] = 0.000966678295179781
struct[0].Gy[11,26] = 0.00706562359705999
struct[0].Gy[11,27] = 0.00273067829517978
struct[0].Gy[11,28] = 0.00000962359705999127
struct[0].Gy[11,29] = 0.000966678295179781
struct[0].Gy[11,30] = -0.0000962359705999109
struct[0].Gy[11,31] = 0.000333217048202192
struct[0].Gy[12,12] = -1
struct[0].Gy[12,16] = 0.000966678295179781
struct[0].Gy[12,17] = -0.00000962359705999132
struct[0].Gy[12,18] = 0.000966678295179781
struct[0].Gy[12,19] = -0.00000962359705999132
struct[0].Gy[12,20] = 0.00273067829517978
struct[0].Gy[12,21] = -0.00706562359705999
struct[0].Gy[12,22] = 0.000966678295179781
struct[0].Gy[12,23] = -0.00000962359705999127
struct[0].Gy[12,24] = 0.000966678295179781
struct[0].Gy[12,25] = -0.00000962359705999127
struct[0].Gy[12,26] = 0.000966678295179781
struct[0].Gy[12,27] = -0.00000962359705999127
struct[0].Gy[12,28] = 0.00273067829517978
struct[0].Gy[12,29] = -0.00706562359705999
struct[0].Gy[12,30] = 0.000333217048202192
struct[0].Gy[12,31] = 0.0000962359705999109
struct[0].Gy[13,13] = -1
struct[0].Gy[13,16] = 0.00000962359705999132
struct[0].Gy[13,17] = 0.000966678295179781
struct[0].Gy[13,18] = 0.00000962359705999132
struct[0].Gy[13,19] = 0.000966678295179781
struct[0].Gy[13,20] = 0.00706562359705999
struct[0].Gy[13,21] = 0.00273067829517978
struct[0].Gy[13,22] = 0.00000962359705999127
struct[0].Gy[13,23] = 0.000966678295179781
struct[0].Gy[13,24] = 0.00000962359705999127
struct[0].Gy[13,25] = 0.000966678295179781
struct[0].Gy[13,26] = 0.00000962359705999127
struct[0].Gy[13,27] = 0.000966678295179781
struct[0].Gy[13,28] = 0.00706562359705999
struct[0].Gy[13,29] = 0.00273067829517978
struct[0].Gy[13,30] = -0.0000962359705999109
struct[0].Gy[13,31] = 0.000333217048202192
struct[0].Gy[14,14] = -1
struct[0].Gy[14,16] = 0.000966678295179781
struct[0].Gy[14,17] = -0.00000962359705999126
struct[0].Gy[14,18] = 0.000966678295179781
struct[0].Gy[14,19] = -0.00000962359705999126
struct[0].Gy[14,20] = 0.000966678295179781
struct[0].Gy[14,21] = -0.00000962359705999126
struct[0].Gy[14,22] = 0.000966678295179781
struct[0].Gy[14,23] = -0.00000962359705999120
struct[0].Gy[14,24] = 0.000966678295179781
struct[0].Gy[14,25] = -0.00000962359705999122
struct[0].Gy[14,26] = 0.000966678295179781
struct[0].Gy[14,27] = -0.00000962359705999122
struct[0].Gy[14,28] = 0.000966678295179781
struct[0].Gy[14,29] = -0.00000962359705999122
struct[0].Gy[14,30] = 0.000333217048202192
struct[0].Gy[14,31] = 0.0000962359705999110
struct[0].Gy[15,15] = -1
struct[0].Gy[15,16] = 0.00000962359705999126
struct[0].Gy[15,17] = 0.000966678295179781
struct[0].Gy[15,18] = 0.00000962359705999126
struct[0].Gy[15,19] = 0.000966678295179781
struct[0].Gy[15,20] = 0.00000962359705999126
struct[0].Gy[15,21] = 0.000966678295179781
struct[0].Gy[15,22] = 0.00000962359705999120
struct[0].Gy[15,23] = 0.000966678295179781
struct[0].Gy[15,24] = 0.00000962359705999122
struct[0].Gy[15,25] = 0.000966678295179781
struct[0].Gy[15,26] = 0.00000962359705999122
struct[0].Gy[15,27] = 0.000966678295179781
struct[0].Gy[15,28] = 0.00000962359705999122
struct[0].Gy[15,29] = 0.000966678295179781
struct[0].Gy[15,30] = -0.0000962359705999110
struct[0].Gy[15,31] = 0.000333217048202192
struct[0].Gy[16,8] = i_B2_a_r
struct[0].Gy[16,9] = i_B2_a_i
struct[0].Gy[16,14] = -i_B2_a_r
struct[0].Gy[16,15] = -i_B2_a_i
struct[0].Gy[16,16] = v_B2_a_r - v_B2_n_r
struct[0].Gy[16,17] = v_B2_a_i - v_B2_n_i
struct[0].Gy[17,10] = i_B2_b_r
struct[0].Gy[17,11] = i_B2_b_i
struct[0].Gy[17,14] = -i_B2_b_r
struct[0].Gy[17,15] = -i_B2_b_i
struct[0].Gy[17,18] = v_B2_b_r - v_B2_n_r
struct[0].Gy[17,19] = v_B2_b_i - v_B2_n_i
struct[0].Gy[18,12] = i_B2_c_r
struct[0].Gy[18,13] = i_B2_c_i
struct[0].Gy[18,14] = -i_B2_c_r
struct[0].Gy[18,15] = -i_B2_c_i
struct[0].Gy[18,20] = v_B2_c_r - v_B2_n_r
struct[0].Gy[18,21] = v_B2_c_i - v_B2_n_i
struct[0].Gy[19,8] = -i_B2_a_i
struct[0].Gy[19,9] = i_B2_a_r
struct[0].Gy[19,14] = i_B2_a_i
struct[0].Gy[19,15] = -i_B2_a_r
struct[0].Gy[19,16] = v_B2_a_i - v_B2_n_i
struct[0].Gy[19,17] = -v_B2_a_r + v_B2_n_r
struct[0].Gy[20,10] = -i_B2_b_i
struct[0].Gy[20,11] = i_B2_b_r
struct[0].Gy[20,14] = i_B2_b_i
struct[0].Gy[20,15] = -i_B2_b_r
struct[0].Gy[20,18] = v_B2_b_i - v_B2_n_i
struct[0].Gy[20,19] = -v_B2_b_r + v_B2_n_r
struct[0].Gy[21,12] = -i_B2_c_i
struct[0].Gy[21,13] = i_B2_c_r
struct[0].Gy[21,14] = i_B2_c_i
struct[0].Gy[21,15] = -i_B2_c_r
struct[0].Gy[21,20] = v_B2_c_i - v_B2_n_i
struct[0].Gy[21,21] = -v_B2_c_r + v_B2_n_r
struct[0].Gy[22,16] = 1
struct[0].Gy[22,18] = 1
struct[0].Gy[22,20] = 1
struct[0].Gy[22,22] = 1
struct[0].Gy[23,17] = 1
struct[0].Gy[23,19] = 1
struct[0].Gy[23,21] = 1
struct[0].Gy[23,23] = 1
struct[0].Gy[24,0] = i_B3_a_r
struct[0].Gy[24,1] = i_B3_a_i
struct[0].Gy[24,6] = -i_B3_a_r
struct[0].Gy[24,7] = -i_B3_a_i
struct[0].Gy[24,24] = v_B3_a_r - v_B3_n_r
struct[0].Gy[24,25] = v_B3_a_i - v_B3_n_i
struct[0].Gy[25,2] = i_B3_b_r
struct[0].Gy[25,3] = i_B3_b_i
struct[0].Gy[25,6] = -i_B3_b_r
struct[0].Gy[25,7] = -i_B3_b_i
struct[0].Gy[25,26] = v_B3_b_r - v_B3_n_r
struct[0].Gy[25,27] = v_B3_b_i - v_B3_n_i
struct[0].Gy[26,4] = i_B3_c_r
struct[0].Gy[26,5] = i_B3_c_i
struct[0].Gy[26,6] = -i_B3_c_r
struct[0].Gy[26,7] = -i_B3_c_i
struct[0].Gy[26,28] = v_B3_c_r - v_B3_n_r
struct[0].Gy[26,29] = v_B3_c_i - v_B3_n_i
struct[0].Gy[27,0] = -i_B3_a_i
struct[0].Gy[27,1] = i_B3_a_r
struct[0].Gy[27,6] = i_B3_a_i
struct[0].Gy[27,7] = -i_B3_a_r
struct[0].Gy[27,24] = v_B3_a_i - v_B3_n_i
struct[0].Gy[27,25] = -v_B3_a_r + v_B3_n_r
struct[0].Gy[28,2] = -i_B3_b_i
struct[0].Gy[28,3] = i_B3_b_r
struct[0].Gy[28,6] = i_B3_b_i
struct[0].Gy[28,7] = -i_B3_b_r
struct[0].Gy[28,26] = v_B3_b_i - v_B3_n_i
struct[0].Gy[28,27] = -v_B3_b_r + v_B3_n_r
struct[0].Gy[29,4] = -i_B3_c_i
struct[0].Gy[29,5] = i_B3_c_r
struct[0].Gy[29,6] = i_B3_c_i
struct[0].Gy[29,7] = -i_B3_c_r
struct[0].Gy[29,28] = v_B3_c_i - v_B3_n_i
struct[0].Gy[29,29] = -v_B3_c_r + v_B3_n_r
struct[0].Gy[30,24] = 1
struct[0].Gy[30,26] = 1
struct[0].Gy[30,28] = 1
struct[0].Gy[30,30] = 1
struct[0].Gy[31,25] = 1
struct[0].Gy[31,27] = 1
struct[0].Gy[31,29] = 1
struct[0].Gy[31,31] = 1
if mode > 12:
struct[0].Fu[0,22] = 1
struct[0].Gu[16,10] = 1
struct[0].Gu[17,12] = 1
struct[0].Gu[18,14] = 1
struct[0].Gu[19,11] = 1
struct[0].Gu[20,13] = 1
struct[0].Gu[21,15] = 1
struct[0].Gu[24,16] = 1
struct[0].Gu[25,18] = 1
struct[0].Gu[26,20] = 1
struct[0].Gu[27,17] = 1
struct[0].Gu[28,19] = 1
struct[0].Gu[29,21] = 1
struct[0].Hx[0,0] = 1
@numba.njit(cache=True)
def ini(struct,mode):
# Parameters:
a = struct[0].a
# Inputs:
v_B1_a_r = struct[0].v_B1_a_r
v_B1_a_i = struct[0].v_B1_a_i
v_B1_b_r = struct[0].v_B1_b_r
v_B1_b_i = struct[0].v_B1_b_i
v_B1_c_r = struct[0].v_B1_c_r
v_B1_c_i = struct[0].v_B1_c_i
i_B3_n_r = struct[0].i_B3_n_r
i_B3_n_i = struct[0].i_B3_n_i
i_B2_n_r = struct[0].i_B2_n_r
i_B2_n_i = struct[0].i_B2_n_i
p_B2_a = struct[0].p_B2_a
q_B2_a = struct[0].q_B2_a
p_B2_b = struct[0].p_B2_b
q_B2_b = struct[0].q_B2_b
p_B2_c = struct[0].p_B2_c
q_B2_c = struct[0].q_B2_c
p_B3_a = struct[0].p_B3_a
q_B3_a = struct[0].q_B3_a
p_B3_b = struct[0].p_B3_b
q_B3_b = struct[0].q_B3_b
p_B3_c = struct[0].p_B3_c
q_B3_c = struct[0].q_B3_c
u_dummy = struct[0].u_dummy
# Dynamical states:
x_dummy = struct[0].x[0,0]
# Algebraic states:
v_B3_a_r = struct[0].y_ini[0,0]
v_B3_a_i = struct[0].y_ini[1,0]
v_B3_b_r = struct[0].y_ini[2,0]
v_B3_b_i = struct[0].y_ini[3,0]
v_B3_c_r = struct[0].y_ini[4,0]
v_B3_c_i = struct[0].y_ini[5,0]
v_B3_n_r = struct[0].y_ini[6,0]
v_B3_n_i = struct[0].y_ini[7,0]
v_B2_a_r = struct[0].y_ini[8,0]
v_B2_a_i = struct[0].y_ini[9,0]
v_B2_b_r = struct[0].y_ini[10,0]
v_B2_b_i = struct[0].y_ini[11,0]
v_B2_c_r = struct[0].y_ini[12,0]
v_B2_c_i = struct[0].y_ini[13,0]
v_B2_n_r = struct[0].y_ini[14,0]
v_B2_n_i = struct[0].y_ini[15,0]
i_B2_a_r = struct[0].y_ini[16,0]
i_B2_a_i = struct[0].y_ini[17,0]
i_B2_b_r = struct[0].y_ini[18,0]
i_B2_b_i = struct[0].y_ini[19,0]
i_B2_c_r = struct[0].y_ini[20,0]
i_B2_c_i = struct[0].y_ini[21,0]
i_B2_n_r = struct[0].y_ini[22,0]
i_B2_n_i = struct[0].y_ini[23,0]
i_B3_a_r = struct[0].y_ini[24,0]
i_B3_a_i = struct[0].y_ini[25,0]
i_B3_b_r = struct[0].y_ini[26,0]
i_B3_b_i = struct[0].y_ini[27,0]
i_B3_c_r = struct[0].y_ini[28,0]
i_B3_c_i = struct[0].y_ini[29,0]
i_B3_n_r = struct[0].y_ini[30,0]
i_B3_n_i = struct[0].y_ini[31,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = u_dummy - x_dummy
# Algebraic equations:
if | |
def fit_and_test_model(clf, model_name, model_desc, hyperparams, feats,
inspect_feat_coeffs=False):
"""Fit and test model.
Args:
clf: classifier model object
model_name: short string naming model
model_desc: string describing model
hyperparams: list [p] of hyperparameters where each p is a string
and clf.p gives the value chosen by the hyperparameter search
feats: input features type
inspect_feat_coeffs: if set, save the feature coefficients with
their descriptions and print the top ones (by absolute value)
Returns:
dict giving metrics for the best choice of model hyperparameters
"""
# Fit model
clf.fit(x_train[feats], y_train[feats])
if inspect_feat_coeffs:
# Combine coefficients with their descriptions
if hasattr(clf, 'coef_'):
coeffs = clf.coef_
else:
# clf is likely a CV (e.g., RandomizedSearchCV) object
coeffs = clf.best_estimator_.coef_
if len(coeffs) == 1:
coeffs = coeffs[0]
coef_descriptions = parsers[feats].baseline_descriptions
assert len(coeffs) == len(coef_descriptions)
cd = zip(coeffs, coef_descriptions)
# Sort (reverse) by absolute value of coefficient
cd_sorted = sorted(cd, key=lambda x: abs(x[0]), reverse=True)
# Print top 10 coefficients with descriptions
print('Top feature coefficients:')
for coeff, description in cd_sorted[:10]:
print(' ', description, ':', coeff)
# Save all feature coefficients to a file, if set
if feat_coeffs_out_tsv_f is not None:
for coeff, description in cd_sorted:
row = [model_name, feats, description, coeff]
line = '\t'.join(str(c) for c in row)
feat_coeffs_out_tsv_f.write(line + '\n')
# Test model
y_pred = clf.predict(x_test[feats])
y_pred_class = [0 if y < 0.5 else 1 for y in y_pred]
# Compute metrics (for auROC and auPR)
# This initially performed this calculation with *both* TensorFlow
# and scikit-learn to report both results. However, it seems that
# using TensorFlow for this calculation sometimes leads to a strange
# crash caused by the GPU running out of memory (possibly because
# there are multiple processes (jobs) for the hyperparameter search and
# TensorFlow tries to use all of the GPU's memory). The results between
# scikit-learn and TensorFlow were virtually identical for auROC, and
# where very close for auPR (reporting avg. precision along with auPR
# should alleviate calculation concerns).
auc_roc_sk = auc_roc_f(y_test[feats], y_pred)
auc_pr_sk = auc_pr_f(y_test[feats], y_pred)
avg_prec = sklearn.metrics.average_precision_score(y_test[feats],
y_pred)
accuracy = sklearn.metrics.accuracy_score(y_test[feats], y_pred_class)
# Print metrics
print('#'*20)
print("Classification with {}".format(model_desc))
if type(hyperparams) is list:
for p in hyperparams:
print(" best {} = {}".format(p, getattr(clf, p)))
else:
print(" best params = {}".format(hyperparams.best_params_))
print(" auROC (SK) = {}".format(auc_roc_sk))
print(" auPR (SK) = {}".format(auc_pr_sk))
print(" Avg. prec = {}".format(avg_prec))
print(" Accuracy = {}".format(accuracy))
print('#'*20)
return {'auc-roc': auc_roc_sk, 'auc-pr': auc_pr_sk,
'avg-prec': avg_prec, 'accuracy': accuracy,
'1_minus_auc-roc': 1.0-auc_roc_sk}
# Logistic regression (no regularization)
def logit(feats):
clf = sklearn.linear_model.LogisticRegression(penalty='none',
class_weight=class_weight, solver='lbfgs',
max_iter=100) # no CV because there are no hyperparameters
return fit_and_test_model(clf, 'logit', 'Logisitic regression',
hyperparams=[], feats=feats,
inspect_feat_coeffs=True)
# L1 logistic regression
def l1_logit(feats):
clf = sklearn.linear_model.LogisticRegression(penalty='l1',
class_weight=class_weight, solver='saga',
max_iter=100, tol=0.0001)
clf_cv = random_search_cv('l1_logit', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'l1_logit', 'L1 logistic regression',
hyperparams=clf_cv, feats=feats,
inspect_feat_coeffs=True)
# L2 logistic regression
def l2_logit(feats):
clf = sklearn.linear_model.LogisticRegression(penalty='l2',
class_weight=class_weight, solver='lbfgs',
max_iter=100, tol=0.0001)
clf_cv = random_search_cv('l2_logit', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'l2_logit', 'L2 logistic regression',
hyperparams=clf_cv, feats=feats,
inspect_feat_coeffs=True)
# Elastic net (L1+L2 logistic regression)
def l1l2_logit(feats):
clf = sklearn.linear_model.LogisticRegression(penalty='elasticnet',
class_weight=class_weight, solver='saga',
max_iter=100, tol=0.0001)
clf_cv = random_search_cv('l1l2_logit', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'l1l2_logit', 'L1+L2 logistic regression',
hyperparams=clf_cv, feats=feats,
inspect_feat_coeffs=True)
# Gradient-boosted classification trees
def gbt(feats):
# It seems this does not support class_weight
clf = sklearn.ensemble.GradientBoostingClassifier(loss='deviance',
tol=0.001)
clf_cv = random_search_cv('gbt', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'gbt', 'Gradient boosting classification',
hyperparams=clf_cv, feats=feats)
# Random forest classification
def rf(feats):
clf = sklearn.ensemble.RandomForestClassifier(criterion='gini',
class_weight=class_weight)
clf_cv = random_search_cv('rf', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'rf', 'Random forest classification',
hyperparams=clf_cv, feats=feats)
# SVM
def svm(feats):
# sklearn's SVC has a fit time that is quadratic in the number of
# samples; to be faster, this uses LinearSVC (with the downside being
# that it does not support higher-dimensional kernels)
clf = sklearn.svm.LinearSVC(class_weight=class_weight, tol=0.0001)
clf_cv = random_search_cv('svm', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'svm', 'SVM',
hyperparams=clf_cv, feats=feats)
# MLP
def mlp(feats):
clf = fnn.MultilayerPerceptron(parsers[feats].context_nt,
regression=False, class_weight=class_weight)
clf_cv = random_search_cv('mlp', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'mlp', 'Multilayer perceptron',
hyperparams=clf_cv, feats=feats)
# LSTM
def lstm(feats):
clf = rnn.LSTM(parsers[feats].context_nt,
regression=False, class_weight=class_weight)
clf_cv = random_search_cv('lstm', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'lstm', 'LSTM',
hyperparams=clf_cv, feats=feats)
metrics_for_models = {}
for model_name in models_to_use:
metrics_for_models[model_name] = {}
for feats in input_feats[model_name]:
print(("Running and evaluating model '%s' with input feature '%s'") %
(model_name, feats))
model_fn = locals()[model_name]
metrics_for_models[model_name][feats] = model_fn(feats)
return metrics_for_models
def regress(x_train, y_train, x_test, y_test,
parsers,
num_inner_splits=5,
scoring_method='rho',
models_to_use=None,
feat_coeffs_out_tsv_f=None):
"""Perform regression.
Test data is used for evaluating the model with the best choice of
hyperparameters, after refitting across *all* the train data.
Args:
x_{train,test}: input data for train/test
y_{train,test}: output labels for train/test
num_inner_splits: number of splits for cross-validation
parsers: parse_data parsers to use for splitting data
scoring_method: method to use for scoring test results; 'mse' (mean
squared error) or 'rho' (Spearman's rank correlation)
models_to_use: list of models to test; if None, test all
feat_coeffs_out_tsv_f: if set, file handler to which to write
coefficients for each feature (linear models only; only for
the best estimator after hyperparameter search)
Returns:
dict {model: {input feats: metrics on test data for best choice of
hyperparameters for model}}
"""
# Check models_to_use
all_models = ['lr', 'l1_lr', 'l2_lr', 'l1l2_lr', 'gbt', 'rf', 'mlp', 'lstm']
if models_to_use is None:
models_to_use = all_models
assert set(models_to_use).issubset(all_models)
# Set the input feats to use for different models
# Use the same choice for all models *except* lstm, which should be in a
# series form where each time step corresponds to a position
input_feats = {}
for m in all_models:
if m == 'lstm':
input_feats[m] = ['onehot']
else:
input_feats[m] = ['onehot-flat', 'onehot-simple', 'handcrafted',
'combined']
# With models, perform cross-validation to determine hyperparameters
# Most of the built-in cross-validators find the best choice based on
# R^2; some of them do not support a custom scoring function via a
# `scoring=...` argument. So instead wrap the regression with a
# GridSearchCV object, which does support a custom scoring metric. Use
# spearman rank correlation coefficient for this.
def cv(feats):
return parsers[feats].split(x_train[feats], y_train[feats],
num_inner_splits, stratify_by_pos=True, yield_indices=True)
def rho_f(y, y_pred):
rho, _ = scipy.stats.spearmanr(y, y_pred)
return rho
rho_scorer = sklearn.metrics.make_scorer(rho_f,
greater_is_better=True)
mse_scorer = sklearn.metrics.make_scorer(
sklearn.metrics.mean_squared_error,
greater_is_better=False)
if scoring_method == 'mse':
scorer = mse_scorer
elif scoring_method == 'rho':
scorer = rho_scorer
else:
raise ValueError("Unknown scoring method %s" % scoring_method)
def fit_and_test_model(reg, model_name, model_desc, hyperparams, feats,
inspect_feat_coeffs=False):
"""Fit and test model.
Args:
reg: regression model object
model_name: short string naming model
model_desc: string describing model
hyperparams: list [p] of hyperparameters where each p is a string
and reg.p gives the value chosen by the hyperparameter search
feats: input features type
inspect_feat_coeffs: if set, save the feature coefficients with
their descriptions and print the top ones (by absolute value)
Returns:
dict giving metrics for the best choice of model hyperparameters
"""
# Fit model
reg.fit(x_train[feats], y_train[feats])
if inspect_feat_coeffs:
# Combine coefficients with their descriptions
if hasattr(reg, 'coef_'):
coeffs = reg.coef_
else:
# ref is likely a CV (e.g., RandomizedSearchCV) object
coeffs = reg.best_estimator_.coef_
if len(coeffs) == 1:
coeffs = coeffs[0]
coef_descriptions = parsers[feats].baseline_descriptions
assert len(coeffs) == len(coef_descriptions)
cd = zip(coeffs, coef_descriptions)
# Sort (reverse) by absolute value of coefficient
cd_sorted = sorted(cd, key=lambda x: abs(x[0]), reverse=True)
# Print top 10 coefficients with descriptions
print('Top feature coefficients:')
for coeff, description in cd_sorted[:10]:
print(' ', description, ':', coeff)
# Save all feature coefficients to a file, if set
if feat_coeffs_out_tsv_f is not None:
for coeff, description in cd_sorted:
row = [model_name, feats, description, coeff]
line = '\t'.join(str(c) for c in row)
feat_coeffs_out_tsv_f.write(line + '\n')
# Test model
y_pred = reg.predict(x_test[feats])
# Compute metrics
mse = sklearn.metrics.mean_squared_error(y_test[feats], y_pred)
mae = sklearn.metrics.mean_absolute_error(y_test[feats], y_pred)
R2 = sklearn.metrics.r2_score(y_test[feats], y_pred)
r, _ = scipy.stats.pearsonr(y_test[feats], y_pred)
rho, _ = scipy.stats.spearmanr(y_test[feats], y_pred)
# Note that R2 does not necessarily equal r^2 here. The value R2
# is computed by definition of R^2 (1 minus (residual sum of
# squares)/(total sum of squares)) from the true vs. predicted
| |
# coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://www.mailslurp.com/docs/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from mailslurp_client.configuration import Configuration
class WebhookResultDto(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'created_at': 'datetime',
'http_method': 'str',
'id': 'str',
'inbox_id': 'str',
'message_id': 'str',
'redrive_id': 'str',
'response_body_extract': 'str',
'response_status': 'int',
'response_time_millis': 'int',
'result_type': 'str',
'seen': 'bool',
'updated_at': 'datetime',
'user_id': 'str',
'webhook_event': 'str',
'webhook_id': 'str',
'webhook_url': 'str'
}
attribute_map = {
'created_at': 'createdAt',
'http_method': 'httpMethod',
'id': 'id',
'inbox_id': 'inboxId',
'message_id': 'messageId',
'redrive_id': 'redriveId',
'response_body_extract': 'responseBodyExtract',
'response_status': 'responseStatus',
'response_time_millis': 'responseTimeMillis',
'result_type': 'resultType',
'seen': 'seen',
'updated_at': 'updatedAt',
'user_id': 'userId',
'webhook_event': 'webhookEvent',
'webhook_id': 'webhookId',
'webhook_url': 'webhookUrl'
}
def __init__(self, created_at=None, http_method=None, id=None, inbox_id=None, message_id=None, redrive_id=None, response_body_extract=None, response_status=None, response_time_millis=None, result_type=None, seen=None, updated_at=None, user_id=None, webhook_event=None, webhook_id=None, webhook_url=None, local_vars_configuration=None): # noqa: E501
"""WebhookResultDto - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._created_at = None
self._http_method = None
self._id = None
self._inbox_id = None
self._message_id = None
self._redrive_id = None
self._response_body_extract = None
self._response_status = None
self._response_time_millis = None
self._result_type = None
self._seen = None
self._updated_at = None
self._user_id = None
self._webhook_event = None
self._webhook_id = None
self._webhook_url = None
self.discriminator = None
self.created_at = created_at
self.http_method = http_method
if id is not None:
self.id = id
self.inbox_id = inbox_id
self.message_id = message_id
if redrive_id is not None:
self.redrive_id = redrive_id
if response_body_extract is not None:
self.response_body_extract = response_body_extract
if response_status is not None:
self.response_status = response_status
self.response_time_millis = response_time_millis
if result_type is not None:
self.result_type = result_type
if seen is not None:
self.seen = seen
self.updated_at = updated_at
self.user_id = user_id
self.webhook_event = webhook_event
self.webhook_id = webhook_id
self.webhook_url = webhook_url
@property
def created_at(self):
"""Gets the created_at of this WebhookResultDto. # noqa: E501
:return: The created_at of this WebhookResultDto. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this WebhookResultDto.
:param created_at: The created_at of this WebhookResultDto. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created_at is None: # noqa: E501
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
@property
def http_method(self):
"""Gets the http_method of this WebhookResultDto. # noqa: E501
:return: The http_method of this WebhookResultDto. # noqa: E501
:rtype: str
"""
return self._http_method
@http_method.setter
def http_method(self, http_method):
"""Sets the http_method of this WebhookResultDto.
:param http_method: The http_method of this WebhookResultDto. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and http_method is None: # noqa: E501
raise ValueError("Invalid value for `http_method`, must not be `None`") # noqa: E501
allowed_values = ["GET", "HEAD", "POST", "PUT", "PATCH", "DELETE", "OPTIONS", "TRACE"] # noqa: E501
if self.local_vars_configuration.client_side_validation and http_method not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `http_method` ({0}), must be one of {1}" # noqa: E501
.format(http_method, allowed_values)
)
self._http_method = http_method
@property
def id(self):
"""Gets the id of this WebhookResultDto. # noqa: E501
:return: The id of this WebhookResultDto. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this WebhookResultDto.
:param id: The id of this WebhookResultDto. # noqa: E501
:type: str
"""
self._id = id
@property
def inbox_id(self):
"""Gets the inbox_id of this WebhookResultDto. # noqa: E501
:return: The inbox_id of this WebhookResultDto. # noqa: E501
:rtype: str
"""
return self._inbox_id
@inbox_id.setter
def inbox_id(self, inbox_id):
"""Sets the inbox_id of this WebhookResultDto.
:param inbox_id: The inbox_id of this WebhookResultDto. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and inbox_id is None: # noqa: E501
raise ValueError("Invalid value for `inbox_id`, must not be `None`") # noqa: E501
self._inbox_id = inbox_id
@property
def message_id(self):
"""Gets the message_id of this WebhookResultDto. # noqa: E501
:return: The message_id of this WebhookResultDto. # noqa: E501
:rtype: str
"""
return self._message_id
@message_id.setter
def message_id(self, message_id):
"""Sets the message_id of this WebhookResultDto.
:param message_id: The message_id of this WebhookResultDto. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and message_id is None: # noqa: E501
raise ValueError("Invalid value for `message_id`, must not be `None`") # noqa: E501
self._message_id = message_id
@property
def redrive_id(self):
"""Gets the redrive_id of this WebhookResultDto. # noqa: E501
:return: The redrive_id of this WebhookResultDto. # noqa: E501
:rtype: str
"""
return self._redrive_id
@redrive_id.setter
def redrive_id(self, redrive_id):
"""Sets the redrive_id of this WebhookResultDto.
:param redrive_id: The redrive_id of this WebhookResultDto. # noqa: E501
:type: str
"""
self._redrive_id = redrive_id
@property
def response_body_extract(self):
"""Gets the response_body_extract of this WebhookResultDto. # noqa: E501
:return: The response_body_extract of this WebhookResultDto. # noqa: E501
:rtype: str
"""
return self._response_body_extract
@response_body_extract.setter
def response_body_extract(self, response_body_extract):
"""Sets the response_body_extract of this WebhookResultDto.
:param response_body_extract: The response_body_extract of this WebhookResultDto. # noqa: E501
:type: str
"""
self._response_body_extract = response_body_extract
@property
def response_status(self):
"""Gets the response_status of this WebhookResultDto. # noqa: E501
:return: The response_status of this WebhookResultDto. # noqa: E501
:rtype: int
"""
return self._response_status
@response_status.setter
def response_status(self, response_status):
"""Sets the response_status of this WebhookResultDto.
:param response_status: The response_status of this WebhookResultDto. # noqa: E501
:type: int
"""
self._response_status = response_status
@property
def response_time_millis(self):
"""Gets the response_time_millis of this WebhookResultDto. # noqa: E501
:return: The response_time_millis of this WebhookResultDto. # noqa: E501
:rtype: int
"""
return self._response_time_millis
@response_time_millis.setter
def response_time_millis(self, response_time_millis):
"""Sets the response_time_millis of this WebhookResultDto.
:param response_time_millis: The response_time_millis of this WebhookResultDto. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and response_time_millis is None: # noqa: E501
raise ValueError("Invalid value for `response_time_millis`, must not be `None`") # noqa: E501
self._response_time_millis = response_time_millis
@property
def result_type(self):
"""Gets the result_type of this WebhookResultDto. # noqa: E501
:return: The result_type of this WebhookResultDto. # noqa: E501
:rtype: str
"""
return self._result_type
@result_type.setter
def result_type(self, result_type):
"""Sets the result_type of this WebhookResultDto.
:param result_type: The result_type of this WebhookResultDto. # noqa: E501
:type: str
"""
allowed_values = ["BAD_RESPONSE", "EXCEPTION", "SUCCESS"] # noqa: E501
if self.local_vars_configuration.client_side_validation and result_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `result_type` ({0}), must be one of {1}" # noqa: E501
.format(result_type, allowed_values)
)
self._result_type = result_type
@property
def seen(self):
"""Gets the seen of this WebhookResultDto. # noqa: E501
:return: The seen of this WebhookResultDto. # noqa: E501
:rtype: bool
"""
return self._seen
@seen.setter
def seen(self, seen):
"""Sets the seen of this WebhookResultDto.
:param seen: The seen of this WebhookResultDto. # noqa: E501
:type: bool
"""
self._seen = seen
@property
def updated_at(self):
"""Gets the updated_at of this WebhookResultDto. # noqa: E501
:return: The updated_at of this WebhookResultDto. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this WebhookResultDto.
:param updated_at: The updated_at of this WebhookResultDto. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and updated_at is None: # noqa: E501
raise ValueError("Invalid value for `updated_at`, must not be `None`") # noqa: E501
self._updated_at = updated_at
@property
def user_id(self):
"""Gets the user_id of this WebhookResultDto. # noqa: E501
:return: The user_id of this WebhookResultDto. # noqa: E501
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this WebhookResultDto.
:param user_id: The user_id of this WebhookResultDto. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and user_id is None: # noqa: E501
raise ValueError("Invalid value for `user_id`, must not be `None`") # noqa: E501
self._user_id = user_id
@property
def webhook_event(self):
"""Gets the webhook_event of this WebhookResultDto. # noqa: | |
#!/usr/bin/python
"""
Distributed id and lock service for transaction support.
"""
import kazoo.client
import kazoo.exceptions
import logging
import os
import re
import time
import urllib
from .inspectable_counter import InspectableCounter
from ..dbconstants import MAX_GROUPS_FOR_XG
from cassandra.policies import FallthroughRetryPolicy
from kazoo.exceptions import (KazooException,
ZookeeperError)
from kazoo.retry import KazooRetry
# A cassandra-driver policy that does not retry operations.
NO_RETRIES = FallthroughRetryPolicy()
class ZKTimeoutException(Exception):
""" A special Exception class that should be thrown if a function is
taking longer than expected by the caller to run
"""
pass
# A list that indicates that the Zookeeper node to create should be readable
# and writable by anyone.
ZOO_ACL_OPEN = None
# The value that we should set for paths whose value we don't care about.
DEFAULT_VAL = "default"
# Paths are separated by this for the tree structure in zookeeper.
PATH_SEPARATOR = "/"
# This is the path which contains the different application's lock meta-data.
APPS_PATH = "/appscale/apps"
# This path contains different transaction IDs.
APP_TX_PATH = "txids"
# This is the node which holds all the locks of an application.
APP_LOCK_PATH = "locks"
APP_ID_PATH = "ids"
APP_TX_PREFIX = "tx"
APP_LOCK_PREFIX = "lk"
APP_ID_PREFIX = "id"
# This is the prefix of all keys which have been updated within a transaction.
TX_UPDATEDKEY_PREFIX = "ukey"
# This is the name of the leaf. It holds a list of locks as a string.
TX_LOCK_PATH = "lockpath"
# The path for blacklisted transactions.
TX_BLACKLIST_PATH = "blacklist"
# This is the path name for valid versions of entities used in a transaction.
TX_VALIDLIST_PATH = "validlist"
GC_LOCK_PATH = "gclock"
GC_TIME_PATH = "gclast_time"
# Lock path for the datastore groomer.
DS_GROOM_LOCK_PATH = "/appscale_datastore_groomer"
# Lock path for the datastore backup.
DS_BACKUP_LOCK_PATH = "/appscale_datastore_backup"
# Lock path for the datastore backup.
DS_RESTORE_LOCK_PATH = "/appscale_datastore_restore"
# A unique prefix for cross group transactions.
XG_PREFIX = "xg"
# The separator value for the lock list when using XG transactions.
LOCK_LIST_SEPARATOR = "!XG_LIST!"
# The location of the ZooKeeper server script.
ZK_SERVER_CMD_LOCATIONS = [
os.path.join('/usr', 'share', 'zookeeper', 'bin', 'zkServer.sh'),
os.path.join('/usr', 'lib', 'zookeeper', 'bin', 'zkServer.sh')
]
class ZKTransactionException(Exception):
""" ZKTransactionException defines a custom exception class that should be
thrown whenever there was a problem involving a transaction (e.g., the
transaction failed, we couldn't get a transaction ID).
"""
pass
class ZKInternalException(Exception):
""" ZKInternalException defines a custom exception class that should be
thrown whenever we cannot connect to ZooKeeper for an extended amount of time.
"""
pass
class ZKBadRequest(ZKTransactionException):
""" A class thrown when there are too many locks acquired in a XG transaction
or when XG operations are done on a non XG transaction.
"""
pass
class ZKTransaction:
""" ZKTransaction provides an interface that can be used to acquire locks
and other functions needed to perform database-agnostic transactions
(e.g., releasing locks, keeping track of transaction metadata).
"""
# How long to wait before retrying an operation.
ZK_RETRY_TIME = .5
def __init__(self, zk_client, db_access=None, log_level=logging.INFO):
""" Creates a new ZKTransaction, which will communicate with Zookeeper
on the given host.
Args:
zk_client: An instance of Zookeeper client.
db_access: A DatastoreProxy instance.
log_level: A logging constant that specifies the instance logging level.
"""
class_name = self.__class__.__name__
self.logger = logging.getLogger(class_name)
self.logger.setLevel(log_level)
self.logger.info('Starting {}'.format(class_name))
# Connection instance variables.
self.handle = zk_client
self.run_with_retry = self.handle.retry
self.__counter_cache = {}
self.db_access = db_access
def increment_and_get_counter(self, path, value):
""" Increment a counter atomically.
Args:
path: A str of unique path to the counter.
value: An int of how much to increment the counter by.
Returns:
A tuple (int, int) of the previous value and the new value.
Raises:
ZKTransactionException: If it could not increment the counter.
"""
if path not in self.__counter_cache:
self.__counter_cache[path] = InspectableCounter(self.handle, path)
counter = self.__counter_cache[path]
try:
new_value = counter + value
return new_value - value, new_value
except kazoo.exceptions.ZookeeperError as zoo_exception:
self.logger.exception(zoo_exception)
raise ZKTransactionException("Couldn't increment path {0} by value {1}" \
.format(path, value))
except kazoo.exceptions.KazooException as kazoo_exception:
self.logger.exception(kazoo_exception)
raise ZKTransactionException(
"Couldn't increment path {0} with value {1}" \
.format(path, value))
def get_node(self, path, retries=5):
""" Fetch the ZooKeeper node at the given path.
Args:
path: A PATH_SEPARATOR-separated str that represents the node whose value
should be updated.
retries: The number of times to retry fetching the node.
Returns:
The value of the node.
Raises:
ZKInternalException: If there was an error trying to fetch the node.
"""
try:
return self.run_with_retry(self.handle.get, path)
except kazoo.exceptions.NoNodeError:
return False
except kazoo.exceptions.ZookeeperError as zoo_exception:
self.logger.exception(zoo_exception)
if retries > 0:
self.logger.info('Trying again to fetch node {} with retry #{}'
.format(path, retries))
time.sleep(self.ZK_RETRY_TIME)
return self.get_node(path, retries=retries - 1)
raise ZKInternalException('Unable to fetch node {}'.format(path))
except kazoo.exceptions.KazooException as kazoo_exception:
self.logger.exception(kazoo_exception)
if retries > 0:
self.logger.info('Trying again to fetch node {} with retry #{}'
.format(path, retries))
time.sleep(self.ZK_RETRY_TIME)
return self.get_node(path, retries=retries - 1)
raise ZKInternalException('Unable to fetch node {}'.format(path))
def update_node(self, path, value):
""" Sets the ZooKeeper node at path to value, creating the node if it
doesn't exist.
Args:
path: A PATH_SEPARATOR-separated str that represents the node whose value
should be updated.
value: A str representing the value that should be associated with the
updated node.
"""
self.logger.debug(
'Updating node at {}, with new value {}'.format(path, value))
try:
self.run_with_retry(self.handle.set, path, str(value))
except kazoo.exceptions.NoNodeError:
self.run_with_retry(self.handle.create, path, str(value), ZOO_ACL_OPEN,
makepath=True)
def delete_recursive(self, path):
""" Deletes the ZooKeeper node at path, and any child nodes it may have.
Args:
path: A PATH_SEPARATOR-separated str that represents the node to delete.
"""
try:
children = self.run_with_retry(self.handle.get_children, path)
for child in children:
self.delete_recursive(PATH_SEPARATOR.join([path, child]))
self.run_with_retry(self.handle.delete, path)
except kazoo.exceptions.NoNodeError:
pass
def get_app_root_path(self, app_id):
""" Returns the ZooKeeper path that holds all information for the given
application.
Args:
app_id: A str that represents the application we wish to get the root
path for.
Returns:
A str that represents a ZooKeeper node, whose immediate children are
the transaction prefix path and the locks prefix path.
"""
return PATH_SEPARATOR.join([APPS_PATH, urllib.quote_plus(app_id)])
def get_transaction_prefix_path(self, app_id):
""" Returns the location of the ZooKeeper node who contains all transactions
in progress for the given application.
Args:
app_id: A str that represents the application we wish to get all
transaction information for.
Returns:
A str that represents a ZooKeeper node, whose immediate children are all
of the transactions currently in progress.
"""
return PATH_SEPARATOR.join([self.get_app_root_path(app_id), APP_TX_PATH])
def get_txn_path_before_getting_id(self, app_id):
""" Returns a path that callers can use to get new transaction IDs from
ZooKeeper, which are given as sequence nodes.
Args:
app_id: A str that represents the application we wish to build a new
transaction path for.
Returns: A str that can be used to create new transactions.
"""
return PATH_SEPARATOR.join([self.get_transaction_prefix_path(app_id),
APP_TX_PREFIX])
def get_transaction_path(self, app_id, txid):
""" Returns the location of the ZooKeeper node who contains all information
for a transaction, and is the parent of the transaction lock list and
registered keys for the transaction.
Args:
app_id: A str that represents the application we wish to get the prefix
path for.
txid: An int that represents the transaction ID whose path we wish to
acquire.
"""
txstr = APP_TX_PREFIX + "%010d" % txid
return PATH_SEPARATOR.join([self.get_app_root_path(app_id), APP_TX_PATH,
txstr])
def get_transaction_lock_list_path(self, app_id, txid):
""" Returns the location of the ZooKeeper node whose value is a
XG_LIST-separated str, representing all of the locks that have been acquired
for the given transaction ID.
Args:
app_id: A str that represents the application we wish to get the
transaction information about.
txid: A str that represents the transaction ID we wish to get the lock
list location for.
Returns:
A PATH_SEPARATOR-delimited str corresponding to the ZooKeeper node that
contains the list of locks that have been taken for the given transaction.
"""
return PATH_SEPARATOR.join([self.get_transaction_path(app_id, txid),
TX_LOCK_PATH])
def get_blacklist_root_path(self, app_id):
""" Returns the location of the ZooKeeper node whose children are
all of the blacklisted transaction IDs for the given application ID.
Args:
app_id: A str corresponding to the application who we want to get
blacklisted transaction IDs for.
Returns:
A str corresponding to the ZooKeeper node whose children are blacklisted
transaction IDs.
"""
return PATH_SEPARATOR.join([self.get_transaction_prefix_path(app_id),
TX_BLACKLIST_PATH])
def get_valid_transaction_root_path(self, app_id):
""" Returns the location of the ZooKeeper node whose children are
all of the valid transaction IDs for the given application ID.
Args:
app_id: A str corresponding to the application who we want to get
valid transaction IDs for.
Returns:
A str corresponding to | |
# Set flag to tell trigger start that it has to recover its
# trigger headers.
self.triggerProtected = True
def _restoreTrigger(self):
""" Restores the triggers to previous settings
"""
# No need to do this if triggers are running as will have been
# recovered already if required.
if not self.trigger:
self._HEADandTrig = deepcopy(self._keepTrig)
self._HEADandTrigStr = self._HEADandTrig.tostring()
self._HEADandGogLeftOpen = deepcopy(self._keepGogLeftOpen)
self._HEADandGogRightOpen = deepcopy(self._keepGogRightOpen)
self._HEADandGogBothOpen = deepcopy(self._keepGogBothOpen)
self._HEADandGogBothClosed = deepcopy(self._keepGogBothClosed)
# Set flag to tell trigger start that it has no need to recover its
# trigger headers.
self.triggerProtected = False
def _drawLUTtoScreen(self):
"""(private) Used to set the LUT in 'bits++' mode.
Should not be needed by user if attached to a
:class:`psychopy.visual.Window` since this will automatically
draw the LUT as part of the screen refresh.
"""
# push the projection matrix and set to orthographic
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glPushMatrix()
GL.glLoadIdentity()
# this also sets the 0,0 to be top-left
GL.glOrtho(0, self.win.size[0], self.win.size[1], 0, 0, 1)
# but return to modelview for rendering
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
# draw the pixels
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glRasterPos2i(0, 1)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
GL.glDrawPixels(len(self._HEADandLUT), 1,
GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
self._HEADandLUTstr)
# GL.glDrawPixels(524,1, GL.GL_RGB,GL.GL_UNSIGNED_BYTE,
# self._HEADandLUTstr)
# return to 3D mode (go and pop the projection matrix)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glPopMatrix()
GL.glMatrixMode(GL.GL_MODELVIEW)
def _ResetClock(self):
"""(private) Used to reset Bits hardware clock.
Should not be needed by user if attached to a
:class:`psychopy.visual.Window`
since this will automatically draw the
reset code as part of the screen refresh.
"""
#push the projection matrix and set to orthographic
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glPushMatrix()
GL.glLoadIdentity()
GL.glOrtho( 0, self.win.size[0],self.win.size[1], 0, 0, 1 ) #this also sets the 0,0 to be top-left
#but return to modelview for rendering
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
# unload texture
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
# unload mask
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
# draw the pixels
GL.glRasterPos2i(0,2)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
GL.glDrawPixels(len(self._HEADandClock),1,
GL.GL_RGB,GL.GL_UNSIGNED_BYTE,
self._HEADandClockstr)
#return to 3D mode (go and pop the projection matrix)
GL.glMatrixMode( GL.GL_PROJECTION )
GL.glPopMatrix()
GL.glMatrixMode( GL.GL_MODELVIEW )
# ensures that only 1 clock reset pulse will be issed at a time
self.clockReset=False
def _drawTrigtoScreen(self, sendStr=None):
"""(private) Used to send a trigger pulse.
Should not be needed by user if attached to a
:class:`psychopy.visual.Window`
since this will automatically draw the trigger code as
part of the screen refresh.
"""
if sendStr == None:
sendStr = self._HEADandTrigStr
#push the projection matrix and set to orthographic
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glPushMatrix()
GL.glLoadIdentity()
GL.glOrtho( 0, self.win.size[0],self.win.size[1], 0, 0, 1 ) #this also sets the 0,0 to be top-left
#but return to modelview for rendering
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
#draw the pixels
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glRasterPos2i(0,3)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
GL.glDrawPixels(len(self._HEADandTrig),1,
GL.GL_RGB,GL.GL_UNSIGNED_BYTE,
sendStr)
GL.glMatrixMode( GL.GL_PROJECTION )
GL.glPopMatrix()
GL.glMatrixMode( GL.GL_MODELVIEW )
def _Goggles(self):
"""(private) Used to set control the goggles.
Should not be needed by user if attached to a
:class:`psychopy.visual.Window`
"""
# Work out current goggles state value.
gogglesState = self.gogglesRight*2+self.gogglesLeft
# Toggle the goggle states ready for the next win flip
self.gogglesLeft = 1 - self.gogglesLeft
self.gogglesRight = 1- self.gogglesRight
# Use gogleState to load the desired goggle trigger pattern into.
# the TLock and draw this trigger.
if gogglesState == 0:
self._drawTrigtoScreen(self._HEADandGogBothOpen.tostring())
if gogglesState == 1:
self._drawTrigtoScreen(self._HEADandGogLeftOpen.tostring())
if gogglesState == 2:
self._drawTrigtoScreen(self._HEADandGogRightOpen.tostring())
if gogglesState == 3:
self._drawTrigtoScreen(self._HEADandGogBothClosed.tostring())
def _setupShaders(self):
"""creates and stores the shader programs needed for mono++ and
color++ modes
"""
if not haveShaders:
return
self._shaders = {}
shCompile = shaders.compileProgram
self._shaders['mono++'] = shCompile(shaders.vertSimple,
shaders.bitsMonoModeFrag)
self._shaders['color++'] = shCompile(shaders.vertSimple,
shaders.bitsColorModeFrag)
def _prepareFBOrender(self):
if self.mode == 'mono++':
GL.glUseProgram(self._shaders['mono++'])
elif self.mode == 'color++':
GL.glUseProgram(self._shaders['color++'])
else:
GL.glUseProgram(self.win._progFBOtoFrame)
def _finishFBOrender(self):
GL.glUseProgram(0)
def _afterFBOrender(self):
GL.glDisable(GL.GL_BLEND)
if self.mode.startswith('bits'):
self._drawLUTtoScreen()
if self.gogglesGo: # Will also send triggers if started
self._Goggles()
elif self.trigger:
self._drawTrigtoScreen()
if self.clockReset:
self._ResetClock()
GL.glEnable(GL.GL_BLEND)
class BitsSharp(BitsPlusPlus, serialdevice.SerialDevice):
"""A class to support functions of the Bits# (and most Display++ functions
This device uses the CDC (serial port) connection to the Bits box.
To use it you must have followed the instructions from CRS Ltd. to get
your box into the CDC communication mode.
Typical usage (also see demo in Coder view demos>hardware>BitsBox )::
from psychopy import visual
from psychopy.hardware import crs
# we need to be rendering to framebuffer
win = visual.Window([1024,768], useFBO=True)
bits = crs.BitsSharp(win, mode = 'mono++')
# You can continue using your window as normal and OpenGL shaders
# will convert the output as needed
print(bits.info)
if not bits.OK:
print('failed to connect to Bits box')
core.quit()
core.wait(0.1)
# now, you can change modes using
bits.mode = 'mono++' # 'color++', 'mono++', 'bits++', 'status'
Note that the firmware in Bits# boxes varies over time and some features of
this class may not work for all firmware versions. Also Bits# boxes can be
configured in various ways via their config.xml file so this class makes certain
assumptions about the configuration. In particular it is assumed that all
digital inputs, triggers and analog inputs are reported as part of status
updates. If some of these report are disabled in your config.xml file
then 'status' and 'event' commands in this class may not work.
RTBox commands that reset the key mapping have been found not to work
one some firmware.
"""
name = b'CRS Bits#'
def __init__(self, win=None,
portName=None,
mode='',
checkConfigLevel=1,
gammaCorrect='hardware',
gamma=None,
noComms=False):
"""
:Parameters:
win : a PsychoPy :class:`~psychopy.visual.Window` object, required
portName : the (virtual) serial port to which the device is
connected. If None then PsychoPy will search available
serial ports and test communication (on OSX, the first
match of `/dev/tty.usbmodemfa*` will be used and on
linux `/dev/ttyS0` will be used
mode : 'bits++', 'color++', 'mono++', 'status'
checkConfigLevel : integer
Allows you to specify how much checking of the device is
done to ensure a valid identity look-up table. If you specify
one level and it fails then the check will be escalated to
the next level (e.g. if we check level 1 and find that it
fails we try to find a new LUT):
- 0 don't check at all
- 1 check that the graphics driver and OS version haven't
changed since last LUT calibration
- 2 check that the current LUT calibration still provides
identity (requires switch to status mode)
- 3 search for a new identity look-up table (requires
switch to status mode)
gammaCorrect : string governing how gamma correction is performed
'hardware': use the gamma correction file stored on the
hardware
'FBO': gamma correct using shaders when rendering the FBO
to back buffer
'bitsMode': in bits++ mode there is a user-controlled LUT
that we can use for gamma correction
noComms : bool
If True then don't try to communicate with the device at all
(passive mode). This can be useful if you want to debug the
system without actually having a Bits# connected.
"""
# import pyglet.GL late so that we can import bits.py without it
# initially
global GL, visual
from psychopy import visual
import pyglet.gl as GL
if noComms:
self.noComms = True
self.OK = True
self.sendMessage = self._nullSendMessage
self.getResponse = self._nullGetResponse
else:
self.noComms = False
# look for device on valid serial ports
# parity="N", # 'N'one, 'E'ven, 'O'dd, 'M'ask,
serialdevice.SerialDevice.__init__(self, port=portName,
baudrate=19200,
byteSize=8, stopBits=1,
parity="N",
eol='\n',
maxAttempts=1,
pauseDuration=0.1,
checkAwake=True)
if not self.OK:
return
self.win = win
if self.noComms:
self.frameRate = self.win.getActualFrameRate()
else:
msg='a'
while msg:
msg=self.read(timeout=0.1)
self.sendMessage('$VideoFrameRate\r')
self.pause()
msg=self.read(timeout=0.1)
msg2 = msg.split(b';')
self.frameRate = float(msg2[1])
self._setHeaders(self.frameRate)
# flag for controlling analog outputs
self.analog = False
# replace window methods with our custom ones
self.win._prepareFBOrender = self._prepareFBOrender
self.win._finishFBOrender = self._finishFBOrender
self.win._afterFBOrender = self._afterFBOrender
# Bits++ doesn't do its own correction so we need to
self.gammaCorrect = gammaCorrect
self.gamma = gamma
# we have a confirmed connection. Now check details about device and
# system
if not hasattr(self, 'info'):
self.info = self.getInfo()
self.config = None
self.mode = mode
if self.win is not None:
if not hasattr(self.win, '_prepareFBOrender'):
logging.error("BitsSharp was given | |
<reponame>HSF/iDDS
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0OA
#
# Authors:
# - <NAME>, <<EMAIL>>, 2020 - 2021
# - <NAME>, <<EMAIL>>, 2020
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import datetime
import os
import traceback
from idds.common import exceptions
from idds.common.constants import (TransformType, CollectionStatus, CollectionType,
ContentStatus, ContentType,
ProcessingStatus, WorkStatus)
from idds.workflow.work import Work, Processing
from idds.workflow.workflow import Condition
class DomaCondition(Condition):
def __init__(self, cond=None, current_work=None, true_work=None, false_work=None):
super(DomaCondition, self).__init__(cond=cond, current_work=current_work,
true_work=true_work, false_work=false_work)
class DomaPanDAWork(Work):
def __init__(self, executable=None, arguments=None, parameters=None, setup=None,
work_tag='lsst', exec_type='panda', sandbox=None, work_id=None,
primary_input_collection=None, other_input_collections=None,
output_collections=None, log_collections=None,
logger=None, dependency_map=None, task_name="",
task_queue=None, processing_type=None,
prodSourceLabel='test', task_type='test',
maxwalltime=90000, maxattempt=5, core_count=1,
encode_command_line=False,
num_retries=5,
task_log=None,
task_cloud=None,
task_rss=0):
super(DomaPanDAWork, self).__init__(executable=executable, arguments=arguments,
parameters=parameters, setup=setup, work_type=TransformType.Processing,
work_tag=work_tag, exec_type=exec_type, sandbox=sandbox, work_id=work_id,
primary_input_collection=primary_input_collection,
other_input_collections=other_input_collections,
output_collections=output_collections,
log_collections=log_collections,
release_inputs_after_submitting=True,
logger=logger)
# self.pandamonitor = None
self.panda_url = None
self.panda_url_ssl = None
self.panda_monitor = None
self.dependency_map = dependency_map
self.dependency_map_deleted = []
# self.logger.setLevel(logging.DEBUG)
self.task_name = task_name
self.set_work_name(task_name)
self.queue = task_queue
self.dep_tasks_id_names_map = {}
self.executable = executable
self.processingType = processing_type
self.prodSourceLabel = prodSourceLabel
self.task_type = task_type
self.maxWalltime = maxwalltime
self.maxAttempt = maxattempt
self.core_count = core_count
self.task_log = task_log
self.encode_command_line = encode_command_line
self.task_cloud = task_cloud
self.task_rss = task_rss
self.retry_number = 0
self.num_retries = num_retries
self.poll_panda_jobs_chunk_size = 2000
self.load_panda_urls()
def my_condition(self):
if self.is_finished():
return True
return False
def load_panda_config(self):
panda_config = ConfigParser.SafeConfigParser()
if os.environ.get('IDDS_PANDA_CONFIG', None):
configfile = os.environ['IDDS_PANDA_CONFIG']
if panda_config.read(configfile) == [configfile]:
return panda_config
configfiles = ['%s/etc/panda/panda.cfg' % os.environ.get('IDDS_HOME', ''),
'/etc/panda/panda.cfg', '/opt/idds/etc/panda/panda.cfg',
'%s/etc/panda/panda.cfg' % os.environ.get('VIRTUAL_ENV', '')]
for configfile in configfiles:
if panda_config.read(configfile) == [configfile]:
return panda_config
return panda_config
def load_panda_urls(self):
panda_config = self.load_panda_config()
# self.logger.debug("panda config: %s" % panda_config)
self.panda_url = None
self.panda_url_ssl = None
self.panda_monitor = None
if panda_config.has_section('panda'):
if panda_config.has_option('panda', 'panda_monitor_url'):
self.panda_monitor = panda_config.get('panda', 'panda_monitor_url')
os.environ['PANDA_MONITOR_URL'] = self.panda_monitor
# self.logger.debug("Panda monitor url: %s" % str(self.panda_monitor))
if panda_config.has_option('panda', 'panda_url'):
self.panda_url = panda_config.get('panda', 'panda_url')
os.environ['PANDA_URL'] = self.panda_url
# self.logger.debug("Panda url: %s" % str(self.panda_url))
if panda_config.has_option('panda', 'panda_url_ssl'):
self.panda_url_ssl = panda_config.get('panda', 'panda_url_ssl')
os.environ['PANDA_URL_SSL'] = self.panda_url_ssl
# self.logger.debug("Panda url ssl: %s" % str(self.panda_url_ssl))
if not self.panda_monitor and 'PANDA_MONITOR_URL' in os.environ and os.environ['PANDA_MONITOR_URL']:
self.panda_monitor = os.environ['PANDA_MONITOR_URL']
# self.logger.debug("Panda monitor url: %s" % str(self.panda_monitor))
if not self.panda_url and 'PANDA_URL' in os.environ and os.environ['PANDA_URL']:
self.panda_url = os.environ['PANDA_URL']
# self.logger.debug("Panda url: %s" % str(self.panda_url))
if not self.panda_url_ssl and 'PANDA_URL_SSL' in os.environ and os.environ['PANDA_URL_SSL']:
self.panda_url_ssl = os.environ['PANDA_URL_SSL']
# self.logger.debug("Panda url ssl: %s" % str(self.panda_url_ssl))
def set_agent_attributes(self, attrs, req_attributes=None):
if 'life_time' not in attrs[self.class_name] or int(attrs[self.class_name]['life_time']) <= 0:
attrs['life_time'] = None
super(DomaPanDAWork, self).set_agent_attributes(attrs)
if 'num_retries' in self.agent_attributes and self.agent_attributes['num_retries']:
self.num_retries = int(self.agent_attributes['num_retries'])
if 'poll_panda_jobs_chunk_size' in self.agent_attributes and self.agent_attributes['poll_panda_jobs_chunk_size']:
self.poll_panda_jobs_chunk_size = int(self.agent_attributes['poll_panda_jobs_chunk_size'])
def depend_on(self, work):
for job in self.dependency_map:
inputs_dependency = job["dependencies"]
for input_d in inputs_dependency:
task_name = input_d['task']
if task_name == work.task_name:
return True
return False
def poll_external_collection(self, coll):
try:
if coll.status in [CollectionStatus.Closed]:
return coll
else:
coll.coll_metadata['bytes'] = 1
coll.coll_metadata['availability'] = 1
coll.coll_metadata['events'] = 1
coll.coll_metadata['is_open'] = True
coll.coll_metadata['run_number'] = 1
coll.coll_metadata['did_type'] = 'DATASET'
coll.coll_metadata['list_all_files'] = False
# if (not self.dependency_map_deleted and not self.dependency_map):
if not self.has_new_inputs:
coll.coll_metadata['is_open'] = False
if 'is_open' in coll.coll_metadata and not coll.coll_metadata['is_open']:
coll_status = CollectionStatus.Closed
else:
coll_status = CollectionStatus.Open
coll.status = coll_status
coll.coll_metadata['coll_type'] = CollectionType.Dataset
return coll
except Exception as ex:
self.logger.error(ex)
self.logger.error(traceback.format_exc())
raise exceptions.IDDSException('%s: %s' % (str(ex), traceback.format_exc()))
def get_input_collections(self):
"""
*** Function called by Transformer agent.
"""
colls = [self.primary_input_collection] + self.other_input_collections
for coll_int_id in colls:
coll = self.collections[coll_int_id]
# if self.is_internal_collection(coll):
# coll = self.poll_internal_collection(coll)
# else:
# coll = self.poll_external_collection(coll)
coll = self.poll_external_collection(coll)
self.collections[coll_int_id] = coll
return super(DomaPanDAWork, self).get_input_collections()
def get_mapped_inputs(self, mapped_input_output_maps):
ret = []
for map_id in mapped_input_output_maps:
inputs = mapped_input_output_maps[map_id]['inputs']
# if 'primary' is not set, the first one is the primary input.
primary_input = inputs[0]
for ip in inputs:
if 'primary' in ip['content_metadata'] and ip['content_metadata']['primary']:
primary_input = ip
ret.append(primary_input)
return ret
def get_mapped_outputs(self, mapped_input_output_maps):
ret = []
for map_id in mapped_input_output_maps:
outputs = mapped_input_output_maps[map_id]['outputs']
# if 'primary' is not set, the first one is the primary input.
primary_output = outputs[0]
for ip in outputs:
if 'primary' in ip['content_metadata'] and ip['content_metadata']['primary']:
primary_output = ip
ret.append(primary_output)
return ret
def map_file_to_content(self, coll_id, scope, name):
content = {'coll_id': coll_id,
'scope': scope,
'name': name, # or a different file name from the dataset name
'bytes': 1,
'adler32': '12345678',
'min_id': 0,
'max_id': 1,
'content_type': ContentType.File,
# 'content_relation_type': content_relation_type,
# here events is all events for eventservice, not used here.
'content_metadata': {'events': 1}}
return content
def is_all_dependency_tasks_available(self, inputs_dependency, task_name_to_coll_map):
for input_d in inputs_dependency:
task_name = input_d['task']
if (task_name not in task_name_to_coll_map # noqa: W503
or 'outputs' not in task_name_to_coll_map[task_name] # noqa: W503
or not task_name_to_coll_map[task_name]['outputs']): # noqa: W503
return False
return True
def get_unmapped_jobs(self, mapped_input_output_maps={}):
mapped_outputs = self.get_mapped_outputs(mapped_input_output_maps)
mapped_outputs_name = [ip['name'] for ip in mapped_outputs]
unmapped_jobs = []
for job in self.dependency_map:
output_name = job['name']
if output_name not in mapped_outputs_name:
unmapped_jobs.append(job)
return unmapped_jobs
def get_new_input_output_maps(self, mapped_input_output_maps={}):
"""
*** Function called by Transformer agent.
New inputs which are not yet mapped to outputs.
:param mapped_input_output_maps: Inputs that are already mapped.
"""
new_input_output_maps = {}
unmapped_jobs = self.get_unmapped_jobs(mapped_input_output_maps)
if not unmapped_jobs:
self.set_has_new_inputs(False)
return new_input_output_maps
if unmapped_jobs:
mapped_keys = mapped_input_output_maps.keys()
if mapped_keys:
next_key = max(mapped_keys) + 1
else:
next_key = 1
input_coll = self.get_input_collections()[0]
input_coll_id = input_coll.coll_id
output_coll = self.get_output_collections()[0]
output_coll_id = output_coll.coll_id
task_name_to_coll_map = self.get_work_name_to_coll_map()
for job in unmapped_jobs:
output_name = job['name']
inputs_dependency = job["dependencies"]
if self.is_all_dependency_tasks_available(inputs_dependency, task_name_to_coll_map):
input_content = self.map_file_to_content(input_coll_id, input_coll.scope, output_name)
output_content = self.map_file_to_content(output_coll_id, output_coll.scope, output_name)
new_input_output_maps[next_key] = {'inputs_dependency': [],
'logs': [],
'inputs': [input_content],
'outputs': [output_content]}
for input_d in inputs_dependency:
task_name = input_d['task']
input_name = input_d['inputname']
input_d_coll = task_name_to_coll_map[task_name]['outputs'][0]
input_d_content = self.map_file_to_content(input_d_coll['coll_id'], input_d_coll['scope'], input_name)
new_input_output_maps[next_key]['inputs_dependency'].append(input_d_content)
# all inputs are parsed. move it to dependency_map_deleted
# self.dependency_map_deleted.append(job)
next_key += 1
else:
# not all inputs for this job can be parsed.
# self.dependency_map.append(job)
pass
# self.logger.debug("get_new_input_output_maps, new_input_output_maps: %s" % str(new_input_output_maps))
self.logger.debug("get_new_input_output_maps, new_input_output_maps len: %s" % len(new_input_output_maps))
return new_input_output_maps
def use_dependency_to_release_jobs(self):
"""
*** Function called by Transformer agent.
"""
return True
def get_processing(self, input_output_maps=[], without_creating=False):
"""
*** Function called by Transformer agent.
If there is already an active processing for this work, will do nothing.
If there is no active processings, create_processing will be called.
"""
if self.active_processings:
return self.processings[self.active_processings[0]]
else:
if not without_creating:
# return None
return self.create_processing(input_output_maps)
return None
def create_processing(self, input_output_maps=[]):
"""
*** Function called by Transformer agent.
:param input_output_maps: new maps from inputs to outputs.
"""
# avoid duplicated task name
self.task_name = self.task_name + "_" + str(self.get_request_id()) + "_" + str(self.get_work_id())
in_files = []
for job in self.dependency_map:
in_files.append(job['name'])
task_param_map = {}
task_param_map['vo'] = 'wlcg'
if self.queue and len(self.queue) > 0:
task_param_map['site'] = self.queue
task_param_map['workingGroup'] = 'lsst'
task_param_map['nFilesPerJob'] = 1
task_param_map['nFiles'] = len(in_files)
task_param_map['noInput'] = True
task_param_map['pfnList'] = in_files
task_param_map['taskName'] = self.task_name
task_param_map['userName'] = 'iDDS'
task_param_map['taskPriority'] = 900
task_param_map['architecture'] = ''
task_param_map['transUses'] = ''
task_param_map['transHome'] = None
if self.encode_command_line:
# task_param_map['transPath'] = 'https://atlpan.web.cern.ch/atlpan/bash-c-enc'
task_param_map['transPath'] = 'https://storage.googleapis.com/drp-us-central1-containers/bash-c-enc'
task_param_map['encJobParams'] = True
else:
# task_param_map['transPath'] = 'https://atlpan.web.cern.ch/atlpan/bash-c'
task_param_map['transPath'] = 'https://storage.googleapis.com/drp-us-central1-containers/bash-c'
task_param_map['processingType'] = self.processingType
task_param_map['prodSourceLabel'] = self.prodSourceLabel
task_param_map['taskType'] = self.task_type
task_param_map['coreCount'] = self.core_count
task_param_map['skipScout'] = True
task_param_map['cloud'] = self.task_cloud
if self.task_rss and self.task_rss > 0:
task_param_map['ramCount'] = self.task_rss
task_param_map['ramUnit'] = 'MB'
task_param_map['inputPreStaging'] = True
task_param_map['prestagingRuleID'] = 123
task_param_map['nChunksToWait'] = 1
task_param_map['maxCpuCount'] = self.maxWalltime
task_param_map['maxWalltime'] = self.maxWalltime
task_param_map['maxFailure'] = self.maxAttempt
task_param_map['maxAttempt'] = self.maxAttempt
task_param_map['log'] = self.task_log
task_param_map['jobParameters'] = [
{'type': 'constant',
'value': self.executable, # noqa: E501
},
]
task_param_map['reqID'] = self.get_request_id()
processing_metadata = {'task_param': task_param_map}
proc = Processing(processing_metadata=processing_metadata)
proc.workload_id = None
self.add_processing_to_processings(proc)
self.active_processings.append(proc.internal_id)
return proc
def submit_panda_task(self, processing):
try:
from pandatools import Client
proc = processing['processing_metadata']['processing']
task_param = proc.processing_metadata['task_param']
return_code = Client.insertTaskParams(task_param, verbose=False)
if return_code[0] == 0:
return return_code[1][1]
else:
self.logger.warn("submit_panda_task, return_code: %s" % str(return_code))
except Exception as ex:
self.logger.error(ex)
self.logger.error(traceback.format_exc())
# raise exceptions.AgentPluginError('%s: %s' % (str(ex), traceback.format_exc()))
return None
def submit_processing(self, processing):
"""
*** Function called by Carrier agent.
"""
proc = processing['processing_metadata']['processing']
if proc.workload_id:
# if 'task_id' in processing['processing_metadata'] and processing['processing_metadata']['task_id']:
pass
else:
task_id = self.submit_panda_task(processing)
# processing['processing_metadata']['task_id'] = task_id
# processing['processing_metadata']['workload_id'] = task_id
proc.workload_id = task_id
| |
import os
import shutil
import numpy as np
from collections import namedtuple
import glob
import time
import datetime
import pickle
import torch
import matplotlib.pyplot as plt
from termcolor import cprint
from navpy import lla2ned
from collections import OrderedDict
from dataset import BaseDataset
from utils_torch_filter import TORCHIEKF
from utils_numpy_filter import NUMPYIEKF as IEKF
from utils import prepare_data
from train_torch_filter import train_filter
from utils_plot import results_filter
def launch(args):
if args.read_data:
args.dataset_class.read_data(args)
dataset = args.dataset_class(args)
if args.train_filter:
train_filter(args, dataset)
if args.test_filter:
test_filter(args, dataset)
if args.results_filter:
results_filter(args, dataset)
class KITTIParameters(IEKF.Parameters):
# gravity vector
g = np.array([0, 0, -9.80655])
cov_omega = 2e-4
cov_acc = 1e-3
cov_b_omega = 1e-8
cov_b_acc = 1e-6
cov_Rot_c_i = 1e-8
cov_t_c_i = 1e-8
cov_Rot0 = 1e-6
cov_v0 = 1e-1
cov_b_omega0 = 1e-8
cov_b_acc0 = 1e-3
cov_Rot_c_i0 = 1e-5
cov_t_c_i0 = 1e-2
cov_lat = 1
cov_up = 10
def __init__(self, **kwargs):
super(KITTIParameters, self).__init__(**kwargs)
self.set_param_attr()
def set_param_attr(self):
attr_list = [a for a in dir(KITTIParameters) if
not a.startswith('__') and not callable(getattr(KITTIParameters, a))]
for attr in attr_list:
setattr(self, attr, getattr(KITTIParameters, attr))
class KITTIDataset(BaseDataset):
OxtsPacket = namedtuple('OxtsPacket',
'lat, lon, alt, ' + 'roll, pitch, yaw, ' + 'vn, ve, vf, vl, vu, '
'' + 'ax, ay, az, af, al, '
'au, ' + 'wx, wy, wz, '
'wf, wl, wu, '
'' +
'pos_accuracy, vel_accuracy, ' + 'navstat, numsats, ' + 'posmode, '
'velmode, '
'orimode')
# Bundle into an easy-to-access structure
OxtsData = namedtuple('OxtsData', 'packet, T_w_imu')
min_seq_dim = 25 * 100 # 60 s
datasets_fake = ['2011_09_26_drive_0093_extract', '2011_09_28_drive_0039_extract',
'2011_09_28_drive_0002_extract']
"""
'2011_09_30_drive_0028_extract' has trouble at N = [6000, 14000] -> test data
'2011_10_03_drive_0027_extract' has trouble at N = 29481
'2011_10_03_drive_0034_extract' has trouble at N = [33500, 34000]
"""
# training set to the raw data of the KITTI dataset.
# The following dict lists the name and end frame of each sequence that
# has been used to extract the visual odometry / SLAM training set
odometry_benchmark = OrderedDict()
odometry_benchmark["2011_10_03_drive_0027_extract"] = [0, 45692]
odometry_benchmark["2011_10_03_drive_0042_extract"] = [0, 12180]
odometry_benchmark["2011_10_03_drive_0034_extract"] = [0, 47935]
odometry_benchmark["2011_09_26_drive_0067_extract"] = [0, 8000]
odometry_benchmark["2011_09_30_drive_0016_extract"] = [0, 2950]
odometry_benchmark["2011_09_30_drive_0018_extract"] = [0, 28659]
odometry_benchmark["2011_09_30_drive_0020_extract"] = [0, 11347]
odometry_benchmark["2011_09_30_drive_0027_extract"] = [0, 11545]
odometry_benchmark["2011_09_30_drive_0028_extract"] = [11231, 53650]
odometry_benchmark["2011_09_30_drive_0033_extract"] = [0, 16589]
odometry_benchmark["2011_09_30_drive_0034_extract"] = [0, 12744]
odometry_benchmark_img = OrderedDict()
odometry_benchmark_img["2011_10_03_drive_0027_extract"] = [0, 45400]
odometry_benchmark_img["2011_10_03_drive_0042_extract"] = [0, 11000]
odometry_benchmark_img["2011_10_03_drive_0034_extract"] = [0, 46600]
odometry_benchmark_img["2011_09_26_drive_0067_extract"] = [0, 8000]
odometry_benchmark_img["2011_09_30_drive_0016_extract"] = [0, 2700]
odometry_benchmark_img["2011_09_30_drive_0018_extract"] = [0, 27600]
odometry_benchmark_img["2011_09_30_drive_0020_extract"] = [0, 11000]
odometry_benchmark_img["2011_09_30_drive_0027_extract"] = [0, 11000]
odometry_benchmark_img["2011_09_30_drive_0028_extract"] = [11000, 51700]
odometry_benchmark_img["2011_09_30_drive_0033_extract"] = [0, 15900]
odometry_benchmark_img["2011_09_30_drive_0034_extract"] = [0, 12000]
def __init__(self, args):
super(KITTIDataset, self).__init__(args)
self.datasets_validatation_filter['2011_09_30_drive_0028_extract'] = [11231, 53650]
self.datasets_train_filter["2011_10_03_drive_0042_extract"] = [0, None]
self.datasets_train_filter["2011_09_30_drive_0018_extract"] = [0, 15000]
self.datasets_train_filter["2011_09_30_drive_0020_extract"] = [0, None]
self.datasets_train_filter["2011_09_30_drive_0027_extract"] = [0, None]
self.datasets_train_filter["2011_09_30_drive_0033_extract"] = [0, None]
self.datasets_train_filter["2011_10_03_drive_0027_extract"] = [0, 18000]
self.datasets_train_filter["2011_10_03_drive_0034_extract"] = [0, 31000]
self.datasets_train_filter["2011_09_30_drive_0034_extract"] = [0, None]
for dataset_fake in KITTIDataset.datasets_fake:
if dataset_fake in self.datasets:
self.datasets.remove(dataset_fake)
if dataset_fake in self.datasets_train:
self.datasets_train.remove(dataset_fake)
@staticmethod
def read_data(args):
"""
Read the data from the KITTI dataset
:param args:
:return:
"""
print("Start read_data")
t_tot = 0 # sum of times for the all dataset
date_dirs = os.listdir(args.path_data_base)
for n_iter, date_dir in enumerate(date_dirs):
# get access to each sequence
path1 = os.path.join(args.path_data_base, date_dir)
if not os.path.isdir(path1):
continue
date_dirs2 = os.listdir(path1)
for date_dir2 in date_dirs2:
path2 = os.path.join(path1, date_dir2)
if not os.path.isdir(path2):
continue
# read data
oxts_files = sorted(glob.glob(os.path.join(path2, 'oxts', 'data', '*.txt')))
oxts = KITTIDataset.load_oxts_packets_and_poses(oxts_files)
""" Note on difference between ground truth and oxts solution:
- orientation is the same
- north and east axis are inverted
- position are closed to but different
=> oxts solution is not loaded
"""
print("\n Sequence name : " + date_dir2)
if len(oxts) < KITTIDataset.min_seq_dim: # sequence shorter than 30 s are rejected
cprint("Dataset is too short ({:.2f} s)".format(len(oxts) / 100), 'yellow')
continue
lat_oxts = np.zeros(len(oxts))
lon_oxts = np.zeros(len(oxts))
alt_oxts = np.zeros(len(oxts))
roll_oxts = np.zeros(len(oxts))
pitch_oxts = np.zeros(len(oxts))
yaw_oxts = np.zeros(len(oxts))
roll_gt = np.zeros(len(oxts))
pitch_gt = np.zeros(len(oxts))
yaw_gt = np.zeros(len(oxts))
t = KITTIDataset.load_timestamps(path2)
acc = np.zeros((len(oxts), 3))
acc_bis = np.zeros((len(oxts), 3))
gyro = np.zeros((len(oxts), 3))
gyro_bis = np.zeros((len(oxts), 3))
p_gt = np.zeros((len(oxts), 3))
v_gt = np.zeros((len(oxts), 3))
v_rob_gt = np.zeros((len(oxts), 3))
k_max = len(oxts)
for k in range(k_max):
oxts_k = oxts[k]
t[k] = 3600 * t[k].hour + 60 * t[k].minute + t[k].second + t[
k].microsecond / 1e6
lat_oxts[k] = oxts_k[0].lat
lon_oxts[k] = oxts_k[0].lon
alt_oxts[k] = oxts_k[0].alt
acc[k, 0] = oxts_k[0].af
acc[k, 1] = oxts_k[0].al
acc[k, 2] = oxts_k[0].au
acc_bis[k, 0] = oxts_k[0].ax
acc_bis[k, 1] = oxts_k[0].ay
acc_bis[k, 2] = oxts_k[0].az
gyro[k, 0] = oxts_k[0].wf
gyro[k, 1] = oxts_k[0].wl
gyro[k, 2] = oxts_k[0].wu
gyro_bis[k, 0] = oxts_k[0].wx
gyro_bis[k, 1] = oxts_k[0].wy
gyro_bis[k, 2] = oxts_k[0].wz
roll_oxts[k] = oxts_k[0].roll
pitch_oxts[k] = oxts_k[0].pitch
yaw_oxts[k] = oxts_k[0].yaw
v_gt[k, 0] = oxts_k[0].ve
v_gt[k, 1] = oxts_k[0].vn
v_gt[k, 2] = oxts_k[0].vu
v_rob_gt[k, 0] = oxts_k[0].vf
v_rob_gt[k, 1] = oxts_k[0].vl
v_rob_gt[k, 2] = oxts_k[0].vu
p_gt[k] = oxts_k[1][:3, 3]
Rot_gt_k = oxts_k[1][:3, :3]
roll_gt[k], pitch_gt[k], yaw_gt[k] = IEKF.to_rpy(Rot_gt_k)
t0 = t[0]
t = np.array(t) - t[0]
# some data can have gps out
if np.max(t[:-1] - t[1:]) > 0.1:
cprint(date_dir2 + " has time problem", 'yellow')
ang_gt = np.zeros((roll_gt.shape[0], 3))
ang_gt[:, 0] = roll_gt
ang_gt[:, 1] = pitch_gt
ang_gt[:, 2] = yaw_gt
p_oxts = lla2ned(lat_oxts, lon_oxts, alt_oxts, lat_oxts[0], lon_oxts[0],
alt_oxts[0], latlon_unit='deg', alt_unit='m', model='wgs84')
p_oxts[:, [0, 1]] = p_oxts[:, [1, 0]] # see note
# take correct imu measurements
u = np.concatenate((gyro_bis, acc_bis), -1)
# convert from numpy
t = torch.from_numpy(t)
p_gt = torch.from_numpy(p_gt)
v_gt = torch.from_numpy(v_gt)
ang_gt = torch.from_numpy(ang_gt)
u = torch.from_numpy(u)
# convert to float
t = t.float()
u = u.float()
p_gt = p_gt.float()
ang_gt = ang_gt.float()
v_gt = v_gt.float()
mondict = {
't': t, 'p_gt': p_gt, 'ang_gt': ang_gt, 'v_gt': v_gt,
'u': u, 'name': date_dir2, 't0': t0
}
t_tot += t[-1] - t[0]
KITTIDataset.dump(mondict, args.path_data_save, date_dir2)
print("\n Total dataset duration : {:.2f} s".format(t_tot))
@staticmethod
def prune_unused_data(args):
"""
Deleting image and velodyne
Returns:
"""
unused_list = ['image_00', 'image_01', 'image_02', 'image_03', 'velodyne_points']
date_dirs = ['2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
for date_dir in date_dirs:
path1 = os.path.join(args.path_data_base, date_dir)
if not os.path.isdir(path1):
continue
date_dirs2 = os.listdir(path1)
for date_dir2 in date_dirs2:
path2 = os.path.join(path1, date_dir2)
if not os.path.isdir(path2):
continue
print(path2)
for folder in unused_list:
path3 = os.path.join(path2, folder)
if os.path.isdir(path3):
print(path3)
shutil.rmtree(path3)
@staticmethod
def subselect_files(files, indices):
try:
files = [files[i] for i in indices]
except:
pass
return files
@staticmethod
def rotx(t):
"""Rotation about the x-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[1, 0, 0], [0, c, -s], [0, s, c]])
@staticmethod
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
@staticmethod
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
@staticmethod
def pose_from_oxts_packet(packet, scale):
"""Helper method to compute a SE(3) pose matrix from an OXTS packet.
"""
er = 6378137. # earth radius (approx.) in meters
# Use a Mercator projection to get the translation vector
tx = scale * packet.lon * np.pi * er / 180.
ty = scale * er * np.log(np.tan((90. + packet.lat) * np.pi / 360.))
tz = packet.alt
t = np.array([tx, ty, tz])
# Use the Euler angles to get the rotation matrix
Rx = KITTIDataset.rotx(packet.roll)
Ry = KITTIDataset.roty(packet.pitch)
Rz = KITTIDataset.rotz(packet.yaw)
R = Rz.dot(Ry.dot(Rx))
# Combine the translation and rotation into a homogeneous transform
return R, t
@staticmethod
def transform_from_rot_trans(R, t):
"""Transformation matrix from rotation matrix and translation vector."""
R = R.reshape(3, 3)
t = t.reshape(3, 1)
return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))
@staticmethod
def load_oxts_packets_and_poses(oxts_files):
"""Generator to read OXTS ground truth data.
Poses are given in an East-North-Up coordinate system
whose origin is the first GPS position.
"""
# Scale for Mercator projection (from first lat value)
scale = None
# Origin of the global coordinate system (first GPS position)
origin = None
oxts = []
for filename in oxts_files:
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split()
# Last five entries are flags and counts
line[:-5] = [float(x) for x in line[:-5]]
line[-5:] = [int(float(x)) | |
# well ties
# @author <NAME>, Colorado School of Mines
# @version 01.21.2014
from dtw import *
from tputils import *
from wtutils import *
from imports import *
# Params:
fp = 35
q = 100
dz = getDz()
dz2 = getDz2()
# Image warping
dr1 = 0.02 # 2D smoothness vertically
dr2 = 0.50 # 2D smoothness vertically
dr3 = 0.50 # 2D smoothness vertically
r1min,r1max = -0.06,0.12 # vertical constraint # use -0.06,0.12,0.02,0.5,-1.0,1.0
r2min,r2max = -0.50,0.50 # vertical constraint # use -0.06,0.12,0.02,0.5,-1.0,1.0
r3min,r3max = -0.50,0.50 # vertical constraint # use -0.06,0.12,0.02,0.5,-1.0,1.0
smin,smax = -0.050,0.200 # min and max time shifts for 2D/3D warping
propDir = getPropDir()
csmDir = getCsmDir()
wset = getLogDataset("d")
phase = 44
def main(args):
global s1,s2,s3
cut1=[0.10,1.25];cut2=[3.40,6.65];cut3=[0.6,2.7];
g,s3,s2,s1,gzi,s1z,go = getImage(normalize=True,\
cut1=cut1,cut2=cut2,cut3=cut3,rnrm=True,z=True)
wells3 = [3,12,16,15,7,4,2]; # good wells for 3D (took out 11)
uwis = deepWellSet()
wells2a = [3,16,15,7,2];
wells2b = [3,12,4,11];
wells2c = [3,16,15,7];
#goMultipleTies2(g,uwis,wells2a)
#goMultipleTies2(g,uwis,wells2b)
#goMultipleTies2(g,uwis,wells2c)
goAllTest(g,uwis,wells3,wells2a)
goAllTest(g,uwis,wells3,wells2b)
##goMultipleTies3(g,uwis,wells3,s1z,gzi)
#goSingleTie(g,uwis[15],15)
def goMultipleTies3(g,uwia,wells,s1z,gzi):
uwis = getUwis(uwia,wells)
wid = str(wells)
# Compute well ties
mwt = MultipleWellTies(uwis,dz,g,s1,s2,s3,wset,csmDir)
mwt.setStatic(uwis[1]);
mwt.setStatic(uwis[7]);
mwt.makeSyntheticSeismograms(propDir,fp,q,phase)
bgs = 1.0; p0 = 0.0; p1 = 1.0; p2 = 1.0;
f = mwt.makeInitialSyntheticImage3(bgs,p0,p1,p2)
h = mwt.computeMultipleWellTies3(r1min,r1max,r2min,r2max,r3min,r3max,\
dr1,dr2,dr3,smin,smax,f)
#mwt.makeNewTimeDepthFunction()
# Compute velocities
#bgs = 1.0; p0 = 0.0; p1 = 1.0; p2 = 1.0;
#vi = mwt.interpolateInitialVelocity3(bgs,p0,p1,p2)
#vn = mwt.interpolateUpdatedVelocity3(bgs,p0,p1,p2)
#vs = mwt.interpolateInitialVelocityTied3(bgs,p0,p1,p2)
# Compute time-depths
bgs = 0.5; p0 = 0.0; p1 = 1.0; p2 = 1.0;
maxz = mwt.getMaxz()
sd = Sampling(ince(maxz/dz2),dz2,0)
#extrapolateTest(mwt,sd)
tzv = mwt.interpolateAverageVelocity3(bgs,p0,p1,p2,sd)
tzv = smooth23(tzv)
#tzv = mwt.maketz2(sd,va)
tzn = mwt.interpolateUpdatedTimeDepths3(bgs,p0,p1,p2,sd)
tzn = smooth23(tzn)
#tzi = mwt.interpolateInitialTimeDepths3(bgs,p0,p1,p2,sd)
gz1 = mwt.convertTimeToDepth3(g,tzv)
gz2 = mwt.convertTimeToDepth3(g,tzn)
#gz3 = mwt.convertTimeToDepth3(g,tzi)
gzic = copy(sd.count,s2.count,s3.count,0,0,0,gzi)
dwc = DynamicWarpingWT(-0.20,0.20,sd,s2,s3)
dwc.setSmoothing(0.1,0.5,0.5)
uc = dwc.findShifts(sd,gz1,sd,gzic)
#print 'max uc',max(uc)
#print 'min uc',min(uc)
#print 'sum uc',sum(uc)
#plot3(uc,"difference shifts for depth images",plots=True,sd=sd,cmap=jet,slides="gzdiff3")
#plot3(g,"seismic time image",plots=True)
#plot3(f,"initial synthetic image",plots=True)
#plot3(h,"warped synthetic image",plots=True)
##plot3(vi,"initial velocity",plots=True,cmap=jet)
##plot3(vn,"updated velocity",plots=True,cmap=jet)
##plot3(vs,"shifted velocity",plots=True,cmap=jet)
#plot3(tzv,"time-depth velocity",plots=True,sd=sd,cmap=jet)
#plot3(tzn,"time-depth updated",plots=True,sd=sd,cmap=jet)
##plot3(tzi,"time-depth initial",plots=True,sd=sd,cmap=jet)
#plot3(g,"seismic time image",plots=True)
#plot3(gz1,"seismic depth image v",plots=True,sd=sd,g=tzv,cmap2=jet)
#plot3(gz2,"seismic depth image n",plots=True,sd=sd,g=tzn,cmap2=jet)
#plot3(gzic,"seismic depth transform",plots=True,sd=sd)
##plot3(gz3,"seismic depth image i",plots=True,sd=sd)
ds = 1.345
plot3P(sd,s2,s3,gzic,"depth image",lims=[0.17,sd.last],z=True,slides='gziP',slc=[ds,5,1.65])
ds = 1.285
plot3P(sd,s2,s3,uc,"u",z=True,cmap=jet,cbar="Depth shift (km)",slides='gzdP',slc=[ds,5,1.65])
plot3P(sd,s2,s3,tzn,"tz1",z=True,cmap=jet,cbar="Time (s)",slides='tz1P',slc=[ds,5,1.65])
plot3P(sd,s2,s3,tzv,"tz2",z=True,cmap=jet,cbar="Time (s)",slides='tz2P',slc=[ds,5,1.65])
plot3P(sd,s2,s3,gz1,"depth image",lims=[0.17,sd.last],z=True,slides='gz1P',slc=[ds,5,1.65])
plot3P(sd,s2,s3,gz2,"depth image",lims=[0.17,sd.last],z=True,slides='gz2P',slc=[ds,5,1.65])
plot3P(s1,s2,s3,g,"time image",lims=[s1.first,1.2],slides='gztP',slc=[0.85,5,1.65])
return
f1,h1,sf1,sh1,u1,v0,v1,tz0,tz1,sz,pv,x2,x3 = getWellTies(mwt)
for i in range(len(wells)):
ix2 = inro((x2[i]-s2.first)/s2.delta)
ix3 = inro((x3[i]-s3.first)/s3.delta)
plotCurve(g[ix3][ix2],s1,h1[i],sh1[i])
plotVelPanel(sz[i],tz0[i],v0[i],pv[i],sy12=tz1[i],sy22=v1[i],hlim=[-15,15])
def goMultipleTies2(g,uwia,wells):
wells = sortWells(wells)
uwis = getUwis(uwia,wells)
wid = str(wells)
# Make 2D seismic image with profile through wells
x2,x3 = MultipleWellTies(uwis,dz,s1,wset,csmDir).getWellCoordinates()
lx2,lx3,s2n,x2n = getCurveThruWells(x2,x3,s2.delta,edg=0.1)
g2 = getImageAlongCurve(g,lx2,lx3)
# Compute well ties
mwt = MultipleWellTies(uwis,dz,g2,s1,s2n,wset,csmDir)
mwt.makeSyntheticSeismograms(propDir,fp,q,phase)
mwt.fixX2(x2n)
bgs = 2.0; p0 = 0.0; p1 = 1.0;
f = mwt.makeInitialSyntheticImage2(bgs,p0,p1)
h = mwt.computeMultipleWellTies2(r1min,r1max,r2min,r2max,dr1,dr2,smin,smax,f)
# Compute velocities
bgs = 1.0; p0 = 0.0; p1 = 1.0;
vi = mwt.interpolateInitialVelocity2(bgs,p0,p1)
vn = mwt.interpolateUpdatedVelocity2(bgs,p0,p1)
vs = mwt.interpolateInitialVelocityTied2(bgs,p0,p1)
# Compute time-depths
bgs = 0.5; p0 = 0.0; p1 = 1.0;
maxz = mwt.getMaxz()
sd = Sampling(ince(maxz/dz2),dz2,0)
#extrapolateTest(mwt,sd)
tzv = mwt.interpolateAverageVelocity2(bgs,p0,p1,sd)
#tzv = mwt.maketz2(sd,va)
tzn = mwt.interpolateUpdatedTimeDepths2(bgs,p0,p1,sd)
tzi = mwt.interpolateInitialTimeDepths2(bgs,p0,p1,sd)
gz1 = mwt.convertTimeToDepth2(g2,tzv)
gz2 = mwt.convertTimeToDepth2(g2,tzn)
gz3 = mwt.convertTimeToDepth2(g2,tzi)
pack2 = [s2n,x2n,f,g2,h,vi,vn,vs,sd,tzv,tzn,tzi,gz1,gz2,gz3,mwt]
makePlots2(pack2)
def makePlots2(pack2):
s2n,x2n,f,g2,h,vi,vn,vs,sd,tzv,tzn,tzi,gz1,gz2,gz3,mwt = pack2
f1,h1,sf1,sh1,u1,v0,v1,tz0,tz1,sz,pv,x2,x3 = getWellTies(mwt)
#nw = len(x2n)
#for i in range(nw):
# ix2 = inro((x2n[i]-s2n.first)/s2n.delta)
# plotCurve(g2[ix2],s1,h1[i],sh1[i])
# plotVelPanel(sz[i],tz0[i],v0[i],pv[i],sy12=tz1[i],sy22=v1[i],hlim=[-20,20])
plot2DLogs(s1,s2n,g2,sf1,f1,x2n,title='initial synthetics')
plot2DLogs(s1,s2n,g2,sh1,h1,x2n,title='warped synthetics')
plotSlice(s2n,s1,g2,title='seismic image')
plotSlice(s2n,s1,f,title='initial image')
plotSlice(s2n,s1,h,title='warped image')
plotSlice(s2n,s1,vi,cmap=jet,cbar="velocity (km/s)",title='initial v')
plotSlice(s2n,s1,vn,cmap=jet,cbar="velocity (km/s)",title='updated v')
plotSlice(s2n,s1,vs,cmap=jet,cbar="velocity (km/s)",title='shifted v')
plotSlice(s2n,sd,tzv,cmap=jet,cbar="time (s)",title='velocity td')
plotSlice(s2n,sd,tzn,cmap=jet,cbar="time (s)",title='updated td')
plotSlice(s2n,sd,tzi,cmap=jet,cbar="time (s)",title='initial td')
plotSlice(s2n,s1,g2,title='seismic image')
plotSlice(s2n,sd,gz1,title='seismic depth image1')
plotSlice(s2n,sd,gz2,title='seismic depth image2')
plotSlice(s2n,sd,gz3,title='seismic depth image3')
v0t = makeVot(sz,v0,tz0,sf1)
v1t = makeVot(sz,v1,tz1,sh1)
v2t = makeVot(sz,v0,tz1,sh1)
plot2DLogs(s1,s2n,g2,sf1,v0t,x2n,title='initial v',cbar='Velocity (km/s)',\
lcmap=jet,lclips=[2.75,5.5],velc=True)
plot2DLogs(s1,s2n,g2,sh1,v1t,x2n,title='updated v',cbar='Velocity (km/s)',\
lcmap=jet,lclips=[2.75,5.5],velc=True)
plot2DLogs(s1,s2n,g2,sh1,v2t,x2n,title='shifted v',cbar='Velocity (km/s)',\
lcmap=jet,lclips=[2.75,5.5],velc=True)
def goAllTest(g3,uwisa,wells3,wells2):
wells3 = sortWells(wells3)
uwis = getUwis(uwisa,wells3)
wells2 = sortWells(wells2)
uwis2 = getUwis(uwisa,wells2)
#goSinglesOnly(g3,uwis,uwis2)
#goSinglesThenMultiple(g3,uwis,uwis2)
goMultipleOnly(g3,uwis,uwis2,w2=wells2)
def goSinglesOnly(g3,uwis,uwis2,new=None):
# Do single wellties only
mwt = MultipleWellTies(uwis,dz,g3,s1,s2,s3,wset,csmDir)
#mwt.findPhases()
mwt.setStatic(uwis[2]);
if new:
mwt.makeNewInterpolatedLogs()
mwt.makeSyntheticSeismograms(propDir,fp,q,phase)
mwt.computeSingleWellTies(r1min,r1max,-1.0,1.0,dr1)
title = "swt"
goVelocitiesAndPlot(g3,mwt,uwis2,title)
pack = getWellTies(mwt,uwis2)
f1,h1,sf1,sh1,u1,v0,v1,tz0,tz1,sz,pv,x2,x3 = pack
lx2,lx3,s2n,x2n = getCurveThruWells(x2,x3,s2.delta,edg=0.1)
g2 = getImageAlongCurve(g3,lx2,lx3)
plot2DLogs(s1,s2n,g2,sf1,f1,x2n,title=title+'_li',slides='li_'+title)
plot2DLogs(s1,s2n,g2,sh1,h1,x2n,title=title+'_lt',slides='lt_'+title)
plot2DLogs(s1,s2n,g2,sf1,f1,x2n,title=title+'_li',slides='lio_'+title,rmb=True)
plot2DLogs(s1,s2n,g2,sh1,h1,x2n,title=title+'_lt',slides='lto_'+title,rmb=True)
def goSinglesThenMultiple(g3,uwis,uwis2,new=None):
# Do single then multiple wellties
mwt = MultipleWellTies(uwis,dz,g3,s1,s2,s3,wset,csmDir)
#mwt.findPhases()
mwt.makeSyntheticSeismograms(propDir,fp,q,phase)
mwt.computeSingleThenMultiple()
mwt.computeSingleWellTies(r1min,r1max,-1.0,1.0,dr1)
bgs = 1.0; p0 = 0.0; p1 = 1.0; p2 = 1.0;
if new:
mwt.makeNewSyntheticImage()
mwt.makeNewMultipleWellTie()
mwt.makeNewInterpolatedLogs()
f3 = mwt.makeInitialSyntheticImage3(bgs,p0,p1,p2)
h3 = mwt.computeMultipleWellTies3(r1min,r1max,r2min,r2max,r3min,r3max,\
dr1,dr2,dr3,smin,smax,f3)
title = "swttmwt"
#goPlotSynthetics(g3,f3,h3,mwt,uwis2,title)
goVelocitiesAndPlot(g3,mwt,uwis2,title)
def goMultipleOnly(g3,uwis,uwis2,new=None,w2=None):
# Do multiple wellties only
mwt = MultipleWellTies(uwis,dz,g3,s1,s2,s3,wset,csmDir)
mwt.setStatic(uwis[1]);
if len(uwis)>7: mwt.setStatic(uwis[7]);
mwt.makeSyntheticSeismograms(propDir,fp,q,phase)
bgs = 1.0; p0 = 0.0; p1 = 1.0; p2 = 1.0;
if new:
mwt.makeNewSyntheticImage()
mwt.makeNewMultipleWellTie()
mwt.makeNewInterpolatedLogs()
f3 = mwt.makeInitialSyntheticImage3(bgs,p0,p1,p2)
h3 = mwt.computeMultipleWellTies3(r1min,r1max,r2min,r2max,r3min,r3max,\
dr1,dr2,dr3,smin,smax,f3)
title = "smwt"
maxz = mwt.getMaxz()
sd = Sampling(ince(maxz/dz2),dz2,0)
#extrapolateTest(mwt,sd)
goPlotSynthetics(g3,f3,h3,mwt,uwis2,title,w2)
#goVelocitiesAndPlot(g3,mwt,uwis2,title)
def goVelocitiesAndPlot(g3,mwt,uwis2,title):
cv = [2.75,5.5]
# Make 2D seismic image with profile through wells
maxz = mwt.getMaxz()
sd = Sampling(ince(maxz/dz2),dz2,0)
pack = getWellTies(mwt,uwis2)
f1,h1,sf1,sh1,u1,v0,v1,tz0,tz1,sz,pv,x2,x3 = pack
lx2,lx3,s2n,x2n = getCurveThruWells(x2,x3,s2.delta,edg=0.1)
g2 = getImageAlongCurve(g3,lx2,lx3)
# Make velocities
bgs = 1.0; p0 = 0.0; p1 = 1.5; p2 = 1.5;
vi = mwt.interpolateInitialVelocity3(bgs,p0,p1,p2)
vn = mwt.interpolateInitialVelocityTied3(bgs,p0,p1,p2)
va = mwt.interpolateVavg(bgs,p0,p1,p2,sd,None)
vi2 = getImageAlongCurve(vi,lx2,lx3)
vn2 = getImageAlongCurve(vn,lx2,lx3)
va2 = getImageAlongCurve(va,lx2,lx3)
vi2l = makeVot(sz,v0,tz0,sf1)
vn2l = makeVot(sz,v0,tz1,sh1)
makeVelocityPlots(s1,s2n,x2n,sf1,sh1,g2,vi2,vn2,vi2l,vn2l,title,cv)
plotSlice(s2n,s1,g2,v=va2,title='test'+title,clips2=cv,slides='test')
def makeVelocityPlots(s1,s2n,x2n,sf1,sh1,g2,vi2,vn2,vi2l,vn2l,title,cv):
plot2DLogs(s1,s2n,g2,sf1,vi2l,x2n,title=title+'_linit',cbar='Velocity (km/s)',\
lcmap=jet,lclips=cv,slides='linit_'+title,velc=True)
plot2DLogs(s1,s2n,g2,sh1,vn2l,x2n,title=title+'',cbar='Velocity (km/s)',\
lcmap=jet,lclips=cv,slides='ltied_'+title,velc=True)
plotSlice(s2n,s1,g2,v=vi2,title='init_'+title,clips2=cv,slides='init_'+title)
plotSlice(s2n,s1,g2,v=vn2,title='tied_'+title,clips2=cv,slides='tied_'+title)
def goPlotSynthetics(g3,f3,h3,mwt,uwis2,title,w2=None):
pack = getWellTies(mwt,uwis2)
f1,h1,sf1,sh1,u1,v0,v1,tz0,tz1,sz,pv,x2,x3 = pack
lx2,lx3,s2n,x2n = getCurveThruWells(x2,x3,s2.delta,edg=0.1)
g2 = getImageAlongCurve(g3,lx2,lx3)
f2 = getImageAlongCurve(f3,lx2,lx3)
h2 = getImageAlongCurve(h3,lx2,lx3)
plotSlice(s2n,s1,g2,title='seismic_'+title,slides='seismic_'+title)
plotSlice(s2n,s1,h2,title='wsi_'+title,slides='wsi_'+title)
plotSlice(s2n,s1,f2,title='si_'+title,slides='si_'+title)
plot2DLogs(s1,s2n,g2,sf1,f1,x2n,title=title+'_li',slides='li_'+title)
plot2DLogs(s1,s2n,g2,sh1,h1,x2n,title=title+'_lt',slides='lt_'+title)
plot2DLogs(s1,s2n,g2,sf1,f1,x2n,title=title+'_li',slides='lio_'+title,rmb=True)
plot2DLogs(s1,s2n,g2,sh1,h1,x2n,title=title+'_lt',slides='lto_'+title,rmb=True)
#plotMultipleWellTiesIndiv(mwt,pack,h3)
if w2:
tops,fmsp,datums = getWellTops(w2)
gh2 = get2DHorizons(lx2,lx3)
plot2DLogs(s1,s2n,g2,sh1,h1,x2n,gh=gh2,tops=tops,fmsp=fmsp,tz=tz1,sz=sz,slides='synt_fmshzs')
plot2DLogs(s1,s2n,g2,sf1,f1,x2n,gh=gh2,tops=tops,fmsp=fmsp,tz=tz0,sz=sz,slides='syni_fmshzs')
#goTopAnalysis(tops,sf1,f1,tz0,sz,gh2)
def goTopAnalysis(tops,sf1,f1,tz0,sz,gh2):
fms = ["DKOT","LKOT","CRMT","RDPK","A Sand","C1 Dolo"]
ght,ghy,csty = gh2
def plotSingleWellTies(mwt):
f1,h1,sf1,sh1,u1,v0,v1,tz0,tz1,sz,pv,x2,x3 = getWellTies(mwt)
g3 = mwt.getSeismic3()
nw = len(sf1)
for i in range(nw):
ix2 = inro((x2[i]-s2.first)/s2.delta)
ix3 = inro((x3[i]-s3.first)/s3.delta)
plotCurve(g3[ix3][ix2],s1,h1[i],sh1[i])
plotVelPanel(sz[i],tz0[i],v0[i],pv[i],sy12=tz1[i],sy22=v1[i],hlim=[-20,20])
def plotMultipleWellTiesIndiv(mwt,pack,h3):
f1,h1,sf1,sh1,u1,v0,v1,tz0,tz1,sz,pv,x2,x3 = pack
g3 = mwt.getSeismic3()
nw = len(sf1)
for i in range(nw):
ix2 = inro((x2[i]-s2.first)/s2.delta)
ix3 = inro((x3[i]-s3.first)/s3.delta)
plotCurve(g3[ix3][ix2],s1,h3[ix3][ix2],s1)
#plotVelPanel(sz[i],tz0[i],v0[i],pv[i],sy12=tz1[i],sy22=v1[i],hlim=[-20,20])
def extrapolateTest(mwt,sd):
va1,va2,sza,sda,tz0,tz1,tz2,lea,vle = [],[],[],[],[],[],[],[],[]
#vep,ad = computePredErrors(mwt,sd)
for well in mwt.wellties:
mwt.tzeXOn()
le = mwt.extrapolateLinearX(well.sz,sd,well.tz1)
#plotCurve(le,sd,well.tz1,well.sz)
vf = div(mul(2,floats(well.sz.getValues())),well.tz1)
mwt.tzeXOff()
ve = mwt.extrapolateLinearX(well.sz,sd,vf)
#lpf = LocalPredictionFilter(200,0,16)
#vft = lpf.getTrend(vf)
#vf0 = sub(vf,vft)
#a = lpf.calculateCoeff(vft)
#e = lpf.predictionError(vft,a)
#p = sub(vft,e)
#plotCurve(vf,well.sz,title="vavg")
#plotCurve(vft,well.sz,title="trend")
#plotCurve(vf0,well.sz,title="detrend")
#plotCurve(a,well.sz,title="coeff")
#plotCurve(e,well.sz,title="pred error")
#plotCurve(p,well.sz,title="pred vavg")
#plotCurve(vft,well.sz,p,well.sz,title="detrend with pred error")
vle1 = zerofloat(sd.count)
for i in range(1,len(vle1)):
vle1[i] = 2*sd.getValue(i)/le[i]
vle1[0] = vle1[1]
lea.append(le)
vle.append(vle1)#div(mul(2,floats(sd.getValues())),le))
sza.append(well.sz)
sda.append(sd)
va1.append(vf)
va2.append(ve)
tz0.append(well.tz0)
tz1.append(well.tz1)
tz2.append(div(mul(2,floats(sd.getValues())),ve))
#plotCurve(ve,sd,vf,well.sz)
plotAllVavgCurves(sza,va1,flz=0,slides="vf")
plotAllVavgCurves(sda,va2,flz=0,slides="ve")
#plotAllVavgCurves(sda,vep,flz=0,slides="vep")
plotAllTDCurves(sza,tz0,flz=0,slides="tz0")
plotAllTDCurves(sza,tz1,flz=0,slides="tz1")
plotAllTDCurves(sda,tz2,flz=0,slides="tz1e")
plotAllTDCurves(sda,lea,flz=0,slides="tz1el")
plotAllVavgCurves(sda,vle,flz=0,slides="vel")
ex = 3
sda = [sda[ex]]
sza = [sza[ex]]
tz0 = [tz1[ex]]
tz1 = [tz1[ex]]
tz2 = [tz2[ex]]
lea = [lea[ex]]
vle = [vle[ex]]
ex = str(ex)
plotAllTDCurves(sza,tz0,flz=0,slides="extz0_"+ex)
plotAllTDCurves(sza,tz1,flz=0,slides="extz1_"+ex)
plotAllTDCurves(sda,tz2,flz=0,slides="extz2_"+ex)
plotAllTDCurves(sda,lea,flz=0,slides="exlea_"+ex)
plotAllVavgCurves(sda,vle,flz=0,slides="exvle_"+ex)
def computePredErrors(mwt,sd):
vea = []
sig = 100
nz = sd.count
ad = zerofloat(nz)
ix = fillfloat(1,nz)
# compute the coefficients
lpf = LocalPredictionFilter(sig,2,16)
for well in mwt.wellties:
tzr = mwt.resampleLinear(well.sz,sd,well.tz1)
sz2 = Sampling(len(tzr),sd.delta,well.sz.first)
ifx = sd.indexOfNearest(well.sz.first)
nzl = len(tzr)
vf = div(mul(2,floats(sz2.getValues())),tzr)
vft = lpf.getTrend(vf)
vf0 = sub(vf,vft)
a = lpf.calculateCoeff(vft)
for i in range(nzl):
ad[i+ifx] += a[i]
ix[i+ifx] += 1
# average the coefficients
for i in range(nzl):
if ix[i]>0:
ad[i] /= ix[i]
# apply the coefficients
for well in mwt.wellties:
tzr = mwt.resampleLinear(well.sz,sd,well.tz1)
nzl = len(tzr)
ifx = sd.indexOfNearest(well.sz.first)
lfx = sd.indexOfNearest(well.sz.last)
sz2 = Sampling(len(tzr),sd.delta,well.sz.first)
vf = div(mul(2,floats(sz2.getValues())),tzr)
vft = lpf.getTrend(vf)
vf0 = vf #sub(vf,vft)
ve = zerofloat(nz)
ne = nz-ifx-nzl
for i in range(nzl):
ve[i+ifx] = vf0[i]*ad[i+ifx]
for i in range(ne):
for k in range(nzl):
ve[i+lfx] += ad[i+lfx]*ve[ifx+nzl+i-k]
for i in range(ifx):
for k in range(nzl):
ve[ifx-i] += ad[ifx-i]*ve[ifx-i+k]
vea.append(ve)
SimplePlot.asPoints(ve)
return vea,ad
def getLogsAlongCurve(v,x2,s2n,sh):
nw = len(x2)
f1 = s1.first
d1 = s1.delta
f2 = s2n.first
d2 = s2n.delta
l = []
for i in range(nw):
ix2 = inro((x2[i]-f2)/d2)
nx1 = sh[i].count
jx1 = inro((sh[i].first-f1)/d1)
l.append(copy(nx1,jx1,v[ix2]))
return l
def getWellTies(mwt,uwis=None):
f1,h1,sf1,sh1,u1,v0,v1,tz0,tz1,sz,pv,x2,x3 = \
[],[],[],[],[],[],[],[],[],[],[],[],[]
i=0
for well in mwt.wellties:
if uwis and not uwis[i]==well.id: continue
f1.append(well.f)
h1.append(well.h)
sf1.append(well.sf)
sh1.append(well.sh)
u1.append(well.u)
v0.append(well.v)
v1.append(well.v1)
tz0.append(well.tz0)
tz1.append(well.tz1)
sz.append(well.sz)
pv.append(well.pv)
x2.append(well.x2f)
x3.append(well.x3f)
i+=1
if (i==len(uwis)): break
return f1,h1,sf1,sh1,u1,v0,v1,tz0,tz1,sz,pv,x2,x3
def goSingleTie(g,uwi,well):
wid = str(well)
wt = WellTie(uwi,dz,s1,wset)
wt.makePropagatorSeismogram(propDir,fp,q,phase)
#wt.makePropagatorSeismogram(propDir,fp,q,0)
x3 = wt.x3f; x2 = wt.x2f
gi,ix2,ix3 = getTraces(g,x2,x3,2)
#wt.findPhase()
wt.computeSingleWellTie(gi,r1min,r1max,smin,smax,dr1)
tz0 = wt.tz0
f = wt.f
sf = wt.sf
sz = wt.sz
tz1 = wt.tz1
h = wt.h
sh = wt.sh
v0 = wt.v
v1 = wt.v1
pv = wt.pv
u = wt.u
php = wt.getPhasePlot()
gi0 = gi[0]
sft = Sampling(sf.count,sf.delta,sh.first)
#plotPhaseRMS(php,len(php),1,slides='phaseplotswt'+wid)
lim = [s1.first,s1.last]
plotCurve(gi0,s1,slides="swt_seis"+wid)
plotCurve(f,sf,slides="swt_untie"+wid,vlim=lim)
plotCurve(f,sft,slides="swt_shift"+wid,vlim=lim)
plotCurve(h,sh,slides="swt_tie"+wid,vlim=lim)
plotCurve(gi0,s1,f,sf,slides="swt_untie_seis_"+wid)
plotCurve(gi0,s1,h,sh,slides="swt_tie_seis_"+wid)
plotVelPanel(sz,tz0,v0,pv,sy12=tz1,sy22=v1,hlim=[-15,15],slides="tzp"+wid)
plotLogPanel(sz,v0,wt.d,wt.r,slides="logpanel"+wid)
#pack | |
'''
<plugin key="HivePlug" name="Hive Plugin" author="imcfarla, MikeF and roadsnail" version="2.1-urllib" wikilink="http://www.domoticz.com/wiki/plugins" externallink="https://github.com/imcfarla2003/domoticz-hive">
<description>
<h2>Hive Plugin</h2>
<h3>Features</h3>
<ul style="list-style-type:square">
<li>Thermostats (including multizone)</li>
<li>Active lights</li>
<li>Warm to Cool lights</li>
<li>Colour lights</li>
<li>Activeplugs</li>
</ul>
<h3>To Do</h3>
<ul style="list-style-type:square">
<li>Allow to choose the heating mode from boost, scheduled, manual and off</li>
</ul>
</description>
<params>
<param field="Username" label="Hive Username" width="200px" required="true" default=""/>
<param field="Password" label="Hive Password" width="200px" required="true" default=""/>
<param field="Mode1" label="Heartbeat Multiplier" width="30px" required="true" default="1"/>
<param field="Mode2" label="Domoticz Port - only needed prior to version 3.8791" width="40px" required="false" default="8080"/>
<param field="Mode3" label="Postcode" width="100px" required="false" default=""/>
<param field="Mode6" label="Debug" width="75px">
<options>
<option label="None" value="0" default="true" />
<option label="Python Only" value="2"/>
<option label="Basic Debugging" value="62"/>
<option label="Basic+Messages" value="126"/>
<option label="Connections Only" value="16"/>
<option label="Connections+Queue" value="144"/>
<option label="All" value="-1"/>
</options>
</param>
</params>
</plugin>
'''
try:
import json
import math
from urllib.request import Request, urlopen
from urllib.parse import urlencode
from urllib.error import HTTPError
import Domoticz
import boto3
from pycognito.aws_srp import AWSSRP
except ImportError as L_err:
print("ImportError: {0}".format(L_err))
raise L_err
class BasePlugin:
enabled = False
def __init__(self):
self.sessionId = ''
self.counter = 0
self.multiplier = 10
self.lightsSet = set()
self.activeplugsSet = set()
self.hwrelaySet = set()
self.chrelaySet = set()
self.TimedOutAvailable = False
self.RefreshToken = ''
self.AccessToken = ''
self.Honeycomb = ''
self.Collect = 0
def onStart(self):
Domoticz.Log('Starting')
if Parameters["Mode6"] != "0":
if Parameters["Mode6"] == "Normal":
Domoticz.Debugging(0)
elif Parameters["Mode6"] == "null":
Domoticz.Debugging(0)
elif Parameters["Mode6"] == "Debug":
Domoticz.Debugging(-1)
else:
Domoticz.Debugging(int(Parameters["Mode6"]))
if int(self.getDomoticzRevision()) >= 8651:
# Devices[unit].TimedOut only appears in Revision >= 8651
self.TimedOutAvailable = True
Domoticz.Log("TimedOut available")
else:
Domoticz.Log("TimedOut not available: " + self.getDomoticzRevision())
self.multiplier = int(Parameters['Mode1'])
self.counter = self.multiplier # update immediately
if self.sessionId == '':
Domoticz.Log('Creating Session')
region='eu-west-1'
user_pool_id='eu-west-1_SamNfoWtf'
client_id='3rl4i0ajrmtdm8sbre54p9dvd9'
client = boto3.client('cognito-idp', region_name=region)
aws = AWSSRP(username=Parameters["Username"], password=Parameters["Password"], pool_id=user_pool_id,client_id=client_id, client=client)
tokens = aws.authenticate_user()
self.sessionId = tokens['AuthenticationResult']['IdToken']
self.RefreshToken = tokens['AuthenticationResult']['RefreshToken']
self.AccessToken = tokens['AuthenticationResult']['AccessToken']
self.GetSessionID()
Domoticz.Debug(self.sessionId)
self.onHeartbeat()
def onStop(self):
Domoticz.Log('onStop Called')
def onConnect(self, Connection, Status, Description):
Domoticz.Debug('onConnect called')
def onMessage(self, Connection, Data):
Domoticz.Debug('onMessage called')
def onCommand(self, Unit, Command, Level, Hue):
Domoticz.Log('onCommand called for Unit ' + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
Domoticz.Debug(str(Devices[Unit].Type))
Domoticz.Debug(str(Devices[Unit].SubType))
Domoticz.Debug(Devices[Unit].DeviceID)
Domoticz.Debug(str(Devices[Unit].sValue))
payload = ""
if self.isLight(Unit):
Domoticz.Log("Setting Light Parameters")
if str(Command) == "Set Level":
payload = self.CreateLightPayload("ON", Level)
if str(Command) == "On":
payload = self.CreateLightPayload("ON", Devices[Unit].LastLevel)
if str(Command) == "Off":
payload = self.CreateLightPayload("OFF", Devices[Unit].LastLevel)
if str(Command) == "Set Color":
Domoticz.Debug(Hue)
colourDict = json.loads(Hue)
colourMode = colourDict.get("m")
if colourMode == 2:
# white temp
colourTemp = 6533-(colourDict.get("t")*15)
Domoticz.Debug(str(colourTemp))
payload = self.CreateLightPayload("ON", Level, "TUNABLE", colourTemp)
elif colourMode == 3:
# rgb colour
h, s, v = rgb2hsv(colourDict.get("r"),colourDict.get("g"),colourDict.get("b"))
Domoticz.Debug(str(h) + " " + str(s) + " " +str(v))
payload = self.CreateLightPayload("ON", Level, "COLOUR", h, s)
else:
Domoticz.Log("Colour Mode not supported: " + str(colourMode))
elif self.isThermostat(Unit):
Domoticz.Log("Setting Thermostat Level")
payload = self.CreateThermostatPayload(Level)
elif self.isActivePlug(Unit):
Domoticz.Log("Setting ActivePlug State")
if str(Command) == "On":
payload = self.CreateActivePlugPayload("ON")
if str (Command) == "Off":
payload = self.CreateActivePlugPayload("OFF")
elif self.isHotWaterRelay(Unit):
Domoticz.Log("Setting Hot Water Relay State")
if str(Command) == "On":
payload = self.CreateHotWaterPayload("HEAT") # Android APP Shows as On
if str(Command) == "Off":
payload = self.CreateHotWaterPayload("OFF") # Android APP shows as Off
elif self.isCentralHeatingRelay(Unit):
Domoticz.Log("Setting Central Heating Relay State")
if str(Command) == "On":
payload = self.CreateCentralHeatingPayload("HEAT") # Android APP Shows as Manual (Governed by Thermostat setting)
if str(Command) == "Off":
payload = self.CreateCentralHeatingPayload("OFF") # Android APP shows as Off
else:
Domoticz.Log("Unknown Device Type")
if payload != "":
headers = {'Content-Type': 'application/vnd.alertme.zoo-6.2+json',
'Accept': 'application/vnd.alertme.zoo-6.2+json',
'X-AlertMe-Client': 'swagger',
'X-Omnia-Access-Token': self.sessionId}
url = self.Honeycomb + '/omnia/nodes/' + Devices[Unit].DeviceID
req = Request(url, data = json.dumps(payload).encode('ascii'), headers = headers, unverifiable = True)
req.get_method = lambda : 'PUT'
try:
r = urlopen(req).read().decode('utf-8')
# Process the update sent back from Hive
d = json.loads(r)['nodes']
self.UpdateDeviceState(d)
except Exception as e:
Domoticz.Log(str(e))
else:
Domoticz.Log(str(Command) + " not handled")
def onNotification(self, Name, Subject, Text, Status, Priority, Sound, ImageFile):
Domoticz.Debug('Notification: ' + Name + ',' + Subject + ',' + Text + ',' + Status + ',' + str(Priority) + ',' + Sound + ',' + ImageFile)
def onDisconnect(self, Connection):
Domoticz.Debug('onDisconnect called')
def onHeartbeat(self):
Domoticz.Debug('onHeartbeat called')
if self.counter >= self.multiplier:
Domoticz.Debug('Getting Data')
self.counter = 1
d = self.GetDevices()
if self.Collect == 1:
self.nodes = d
# quit now as we are just collecting the data
exit
self.UpdateDeviceState(d)
if Parameters["Mode3"] != "": #if postcode parameter set for Hive outside temp then....
foundOutsideDevice = False
w = self.GetWeatherURL()
if w != False:
outsidetemp = w["temperature"]["value"]
for unit in Devices:
if Devices[unit].DeviceID == "Hive_Outside":
Devices[unit].Update(nValue=int(outsidetemp), sValue=str(outsidetemp))
foundOutsideDevice = True
if foundOutsideDevice == False:
Domoticz.Device(Name = 'Outside',
Unit = self.GetNextUnit(False),
TypeName = 'Temperature',
DeviceID = 'Hive_Outside').Create()
self.counter = self.multiplier
else:
self.counter += 1
Domoticz.Debug('Counter = ' + str(self.counter))
def GetThermostat(self, d, ttype):
#ttype can be 'Heating' or 'HotWater'
thermostats = False
k = 'state'+ttype+'Relay'
x = find_key_in_list(d, 'http://alertme.com/schema/json/node.class.thermostat.json#')
if x:
for i in x:
if k in i['attributes']:
if thermostats:
thermostats.append(i)
else:
thermostats = [i]
return thermostats
def GetThermostatUI(self, d, parentNodeId):
thermostatui = False
x = find_key_in_list(d, 'http://alertme.com/schema/json/node.class.thermostatui.json#')
if not x: # Try a Hive2 thermostat
x = find_key_in_list(d,"Hive2")
if x:
for i in x:
try:
if i["relationships"]["boundNodes"][0]["id"] == parentNodeId:
thermostatui = i
except Exception as e:
Domoticz.Debug("Thermostatui - No boundNodes under relationship")
if len(x) == 1:
Domoticz.Debug("Only one thermostatui node so using that")
thermostatui = x[0]
return thermostatui
def GetLights(self, d):
lights = False
x = find_key_in_list(d,"http://alertme.com/schema/json/node.class.light.json#")
if x:
lights = x
return lights
def GetTunableLights(self, d):
lights = False
x = find_key_in_list(d,"http://alertme.com/schema/json/node.class.tunable.light.json#")
if x:
lights = x
return lights
def GetColourLights(self, d):
lights = False
x = find_key_in_list(d,"http://alertme.com/schema/json/node.class.colour.tunable.light.json#")
if x:
lights = x
return lights
def GetActivePlugs(self, d):
activeplugs = False
x = find_key_in_list(d,"http://alertme.com/schema/json/node.class.smartplug.json#")
if x:
activeplugs = x
return activeplugs
def GetNextUnit(self, unit):
if not unit:
nextUnit = len(Devices) + 1
else:
nextUnit = unit +1
if nextUnit in Devices or nextUnit < 1:
nextUnit = self.GetNextUnit(nextUnit)
Domoticz.Log("Unit " + str(nextUnit))
return nextUnit
def UpdateDeviceState(self, d):
foundHotWaterDevice = False
foundOutsideDevice = False
Domoticz.Debug('Getting Temperatures')
thermostats = self.GetThermostat(d, 'Heating')
if thermostats:
for node in thermostats:
# TODO: add schedule switch to change toggle scheduled or off (chrelay switch toggles manual or off)
# TODO: add switch for boost mode (fix time to half an hour)
foundInsideDevice = False
foundTargetDevice = False
foundHeatingDevice = False
foundThermostatDevice = False
# get the temperature and heating states
temp = node["attributes"]["temperature"]["reportedValue"]
Domoticz.Debug('Temp = ' + str(temp))
targetTemp = node["attributes"]["targetHeatTemperature"]["reportedValue"]
if targetTemp < 7.0: targetTemp = 7.0
Domoticz.Debug('Target = ' + str(targetTemp))
heating = node["attributes"]["stateHeatingRelay"]["reportedValue"]
Domoticz.Debug('Heating = ' + heating)
Domoticz.Debug('Getting Battery Status')
thermostatui = self.GetThermostatUI(d, node["parentNodeId"])
if (thermostatui):
# get the battery and rssi values
thermostat_battery = thermostatui["attributes"]["batteryLevel"]["reportedValue"]
Domoticz.Debug('Battery = ' + str(thermostat_battery))
thermostat_rssi = 12*((0 - thermostatui["attributes"]["RSSI"]["reportedValue"])/100)
Domoticz.Debug('RSSI = ' + str(thermostat_rssi))
# Loop through the devices and update temperatures
Domoticz.Debug('Updating Devices')
for unit in Devices:
if Devices[unit].DeviceID == node["name"]+"_Current":
Devices[unit].Update(nValue=int(temp), sValue=str(temp))
foundInsideDevice = True
if Devices[unit].DeviceID == node["name"]+"_Target":
Devices[unit].Update(nValue=int(targetTemp), sValue=str(targetTemp))
foundTargetDevice = True
if Devices[unit].DeviceID == node["id"] and Devices[unit].Type == 244: #if CH Switch device
foundHeatingDevice = True
if unit not in set(self.chrelaySet):
self.chrelaySet.add(unit)
if (thermostatui):
if thermostatui["attributes"]["presence"]["reportedValue"] == "ABSENT":
if self.TimedOutAvailable:
if Devices[unit].TimedOut == 0:
Devices[unit].Update(nValue=Devices[unit].nValue, sValue=Devices[unit].sValue, TimedOut=1)
else:
Domoticz.Log("Device Offline : " + Devices[unit].Name)
else:
if heating == 'ON':
if Devices[unit].nValue == 0:
if self.TimedOutAvailable:
Devices[unit].Update(nValue=1, sValue='On', TimedOut=0)
else:
Devices[unit].Update(nValue=1, sValue='On')
else:
if Devices[unit].nValue == 1:
if self.TimedOutAvailable:
Devices[unit].Update(nValue=0, sValue='Off', TimedOut=0)
else:
Devices[unit].Update(nValue=0, sValue='Off')
if Devices[unit].DeviceID == node['id'] and Devices[unit].Type == 242: #Thermostat
foundThermostatDevice = True
Devices[unit].Update(nValue = int(targetTemp),
sValue = str(targetTemp),
BatteryLevel = int(thermostat_battery),
SignalLevel = int(thermostat_rssi))
if foundInsideDevice == False and thermostatui:
Domoticz.Device(Name = thermostatui["name"] + ' - Current',
Unit = self.GetNextUnit(False),
TypeName = 'Temperature',
DeviceID = node["name"]+'_Current').Create()
self.counter = self.multiplier
if foundTargetDevice == False and thermostatui:
Domoticz.Device(Name = thermostatui["name"] + ' - Target',
Unit = self.GetNextUnit(False),
TypeName = 'Temperature',
DeviceID = node["name"]+'_Target').Create()
self.counter = self.multiplier
if foundHeatingDevice == False and thermostatui:
Domoticz.Device(Name | |
= m.get('telephone')
if m.get('gmt_modified') is not None:
self.gmt_modified = m.get('gmt_modified')
if m.get('gmt_create') is not None:
self.gmt_create = m.get('gmt_create')
if m.get('unique_id') is not None:
self.unique_id = m.get('unique_id')
return self
class PaidSpecInstance(TeaModel):
def __init__(
self,
spec_instance_id: str = None,
spec_name: str = None,
spec_conf: str = None,
start_time: str = None,
end_time: str = None,
status: str = None,
):
# 规格实例id
self.spec_instance_id = spec_instance_id
# 规格名称
self.spec_name = spec_name
# 规格配置(描述)
self.spec_conf = spec_conf
# 实例开始时间
self.start_time = start_time
# 实例到期时间
self.end_time = end_time
# 规格实例的运行状态
self.status = status
def validate(self):
self.validate_required(self.spec_instance_id, 'spec_instance_id')
self.validate_required(self.spec_name, 'spec_name')
self.validate_required(self.status, 'status')
def to_map(self):
result = dict()
if self.spec_instance_id is not None:
result['spec_instance_id'] = self.spec_instance_id
if self.spec_name is not None:
result['spec_name'] = self.spec_name
if self.spec_conf is not None:
result['spec_conf'] = self.spec_conf
if self.start_time is not None:
result['start_time'] = self.start_time
if self.end_time is not None:
result['end_time'] = self.end_time
if self.status is not None:
result['status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('spec_instance_id') is not None:
self.spec_instance_id = m.get('spec_instance_id')
if m.get('spec_name') is not None:
self.spec_name = m.get('spec_name')
if m.get('spec_conf') is not None:
self.spec_conf = m.get('spec_conf')
if m.get('start_time') is not None:
self.start_time = m.get('start_time')
if m.get('end_time') is not None:
self.end_time = m.get('end_time')
if m.get('status') is not None:
self.status = m.get('status')
return self
class MainOrderVO(TeaModel):
def __init__(
self,
order_no: str = None,
buyer: str = None,
seller: str = None,
sp_id: str = None,
login_account: str = None,
product_name: str = None,
money: MultiCurrencyMoneyVO = None,
gmt_create: str = None,
gmt_modified: str = None,
gmt_close: str = None,
gmt_pay_success: str = None,
order_status: str = None,
order_type: str = None,
order_origin: str = None,
order_level: str = None,
pay_method: str = None,
order_duration: str = None,
parent_order_no: str = None,
commodity_name: str = None,
spec_code: str = None,
spec_name: str = None,
spec_desc: str = None,
order_detail: str = None,
sub_order_include: bool = None,
spec_props: List[Pair] = None,
ext_params: List[Pair] = None,
):
# order_no
self.order_no = order_no
# buyer
self.buyer = buyer
# seller
self.seller = seller
# sp_id
self.sp_id = sp_id
# login_account
self.login_account = login_account
# product_name
self.product_name = product_name
# money
self.money = money
# gmt_create
self.gmt_create = gmt_create
# gmt_modified
self.gmt_modified = gmt_modified
# gmt_close
self.gmt_close = gmt_close
# gmt_pay_success
self.gmt_pay_success = gmt_pay_success
# order_status
self.order_status = order_status
# order_type
self.order_type = order_type
# order_origin
self.order_origin = order_origin
# order_level
self.order_level = order_level
# pay_method
self.pay_method = pay_method
# orde_duration
self.order_duration = order_duration
# parent_order_no
self.parent_order_no = parent_order_no
# commodity_name
self.commodity_name = commodity_name
# spec_code
self.spec_code = spec_code
# spec_name
self.spec_name = spec_name
# spec_desc
self.spec_desc = spec_desc
# order_detail
self.order_detail = order_detail
# sub_order_include
self.sub_order_include = sub_order_include
# spec_props
self.spec_props = spec_props
# ext_params
self.ext_params = ext_params
def validate(self):
self.validate_required(self.order_no, 'order_no')
self.validate_required(self.buyer, 'buyer')
self.validate_required(self.seller, 'seller')
self.validate_required(self.sp_id, 'sp_id')
self.validate_required(self.login_account, 'login_account')
self.validate_required(self.product_name, 'product_name')
self.validate_required(self.money, 'money')
if self.money:
self.money.validate()
self.validate_required(self.gmt_create, 'gmt_create')
if self.gmt_create is not None:
self.validate_pattern(self.gmt_create, 'gmt_create', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
self.validate_required(self.gmt_modified, 'gmt_modified')
if self.gmt_modified is not None:
self.validate_pattern(self.gmt_modified, 'gmt_modified', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
self.validate_required(self.gmt_close, 'gmt_close')
if self.gmt_close is not None:
self.validate_pattern(self.gmt_close, 'gmt_close', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
self.validate_required(self.gmt_pay_success, 'gmt_pay_success')
if self.gmt_pay_success is not None:
self.validate_pattern(self.gmt_pay_success, 'gmt_pay_success', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
self.validate_required(self.order_status, 'order_status')
self.validate_required(self.order_type, 'order_type')
self.validate_required(self.order_origin, 'order_origin')
self.validate_required(self.order_level, 'order_level')
self.validate_required(self.pay_method, 'pay_method')
self.validate_required(self.order_duration, 'order_duration')
self.validate_required(self.parent_order_no, 'parent_order_no')
self.validate_required(self.commodity_name, 'commodity_name')
self.validate_required(self.spec_code, 'spec_code')
self.validate_required(self.spec_name, 'spec_name')
self.validate_required(self.spec_desc, 'spec_desc')
self.validate_required(self.order_detail, 'order_detail')
self.validate_required(self.sub_order_include, 'sub_order_include')
self.validate_required(self.spec_props, 'spec_props')
if self.spec_props:
for k in self.spec_props:
if k:
k.validate()
self.validate_required(self.ext_params, 'ext_params')
if self.ext_params:
for k in self.ext_params:
if k:
k.validate()
def to_map(self):
result = dict()
if self.order_no is not None:
result['order_no'] = self.order_no
if self.buyer is not None:
result['buyer'] = self.buyer
if self.seller is not None:
result['seller'] = self.seller
if self.sp_id is not None:
result['sp_id'] = self.sp_id
if self.login_account is not None:
result['login_account'] = self.login_account
if self.product_name is not None:
result['product_name'] = self.product_name
if self.money is not None:
result['money'] = self.money.to_map()
if self.gmt_create is not None:
result['gmt_create'] = self.gmt_create
if self.gmt_modified is not None:
result['gmt_modified'] = self.gmt_modified
if self.gmt_close is not None:
result['gmt_close'] = self.gmt_close
if self.gmt_pay_success is not None:
result['gmt_pay_success'] = self.gmt_pay_success
if self.order_status is not None:
result['order_status'] = self.order_status
if self.order_type is not None:
result['order_type'] = self.order_type
if self.order_origin is not None:
result['order_origin'] = self.order_origin
if self.order_level is not None:
result['order_level'] = self.order_level
if self.pay_method is not None:
result['pay_method'] = self.pay_method
if self.order_duration is not None:
result['order_duration'] = self.order_duration
if self.parent_order_no is not None:
result['parent_order_no'] = self.parent_order_no
if self.commodity_name is not None:
result['commodity_name'] = self.commodity_name
if self.spec_code is not None:
result['spec_code'] = self.spec_code
if self.spec_name is not None:
result['spec_name'] = self.spec_name
if self.spec_desc is not None:
result['spec_desc'] = self.spec_desc
if self.order_detail is not None:
result['order_detail'] = self.order_detail
if self.sub_order_include is not None:
result['sub_order_include'] = self.sub_order_include
result['spec_props'] = []
if self.spec_props is not None:
for k in self.spec_props:
result['spec_props'].append(k.to_map() if k else None)
result['ext_params'] = []
if self.ext_params is not None:
for k in self.ext_params:
result['ext_params'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('order_no') is not None:
self.order_no = m.get('order_no')
if m.get('buyer') is not None:
self.buyer = m.get('buyer')
if m.get('seller') is not None:
self.seller = m.get('seller')
if m.get('sp_id') is not None:
self.sp_id = m.get('sp_id')
if m.get('login_account') is not None:
self.login_account = m.get('login_account')
if m.get('product_name') is not None:
self.product_name = m.get('product_name')
if m.get('money') is not None:
temp_model = MultiCurrencyMoneyVO()
self.money = temp_model.from_map(m['money'])
if m.get('gmt_create') is not None:
self.gmt_create = m.get('gmt_create')
if m.get('gmt_modified') is not None:
self.gmt_modified = m.get('gmt_modified')
if m.get('gmt_close') is not None:
self.gmt_close = m.get('gmt_close')
if m.get('gmt_pay_success') is not None:
self.gmt_pay_success = m.get('gmt_pay_success')
if m.get('order_status') is not None:
self.order_status = m.get('order_status')
if m.get('order_type') is not None:
self.order_type = m.get('order_type')
if m.get('order_origin') is not None:
self.order_origin = m.get('order_origin')
if m.get('order_level') is not None:
self.order_level = m.get('order_level')
if m.get('pay_method') is not None:
self.pay_method = m.get('pay_method')
if m.get('order_duration') is not None:
self.order_duration = m.get('order_duration')
if m.get('parent_order_no') is not None:
self.parent_order_no = m.get('parent_order_no')
if m.get('commodity_name') is not None:
self.commodity_name = m.get('commodity_name')
if m.get('spec_code') is not None:
self.spec_code = m.get('spec_code')
if m.get('spec_name') is not None:
self.spec_name = m.get('spec_name')
if m.get('spec_desc') is not None:
self.spec_desc = m.get('spec_desc')
if m.get('order_detail') is not None:
self.order_detail = m.get('order_detail')
if m.get('sub_order_include') is not None:
self.sub_order_include = m.get('sub_order_include')
self.spec_props = []
if m.get('spec_props') is not None:
for k in m.get('spec_props'):
temp_model = Pair()
self.spec_props.append(temp_model.from_map(k))
self.ext_params = []
if m.get('ext_params') is not None:
for k in m.get('ext_params'):
temp_model = Pair()
self.ext_params.append(temp_model.from_map(k))
return self
class CommodityVO(TeaModel):
def __init__(
self,
commodity_code: str = None,
commodity_name: str = None,
service_type: str = None,
source_type: str = None,
desc: str = None,
):
# 商品Code
self.commodity_code = commodity_code
# 商品名称
self.commodity_name = commodity_name
# 商品服务类型
self.service_type = service_type
# 商品来源
self.source_type = source_type
# 商品描述
self.desc = desc
def validate(self):
self.validate_required(self.commodity_code, 'commodity_code')
self.validate_required(self.commodity_name, 'commodity_name')
self.validate_required(self.service_type, 'service_type')
self.validate_required(self.source_type, 'source_type')
self.validate_required(self.desc, 'desc')
def to_map(self):
result = dict()
if self.commodity_code is not None:
result['commodity_code'] = self.commodity_code
if self.commodity_name is not None:
result['commodity_name'] = self.commodity_name
if self.service_type is not None:
result['service_type'] = self.service_type
if self.source_type is not None:
result['source_type'] = self.source_type
if self.desc is not None:
result['desc'] = self.desc
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commodity_code') is not None:
self.commodity_code = m.get('commodity_code')
if m.get('commodity_name') is not None:
self.commodity_name = m.get('commodity_name')
if m.get('service_type') is not None:
self.service_type = m.get('service_type')
if m.get('source_type') is not None:
self.source_type = m.get('source_type')
if m.get('desc') is not None:
self.desc = m.get('desc')
return self
class ApBill(TeaModel):
def __init__(self):
pass
def validate(self):
pass
def to_map(self):
result = dict()
return result
def from_map(self, m: dict = None):
m = m or dict()
return self
class TaxInfo(TeaModel):
def __init__(
self,
taxpayer_name: str = None,
taxpayer_no: str = None,
bank_name: str = None,
bank_account: str = None,
address: str = None,
telephone: str = None,
):
# 纳税人名称
self.taxpayer_name = taxpayer_name
# 纳税人识别号
self.taxpayer_no = taxpayer_no
# 开户行名称
self.bank_name = bank_name
# 开户行账号
self.bank_account = bank_account
# 开户行地址
self.address = address
# 开户行电话
self.telephone = telephone
def validate(self):
| |
# region Description
"""
base.py: Base class for Raw-packet project (base)
Author: <NAME>
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
from wpoium.plugins.networking.Utils.vendors import vendors_dictionary
try:
from platform import system, release, linux_distribution
except ImportError:
from platform import system, release
try:
from os import getuid
except ImportError:
from ctypes import windll
from os.path import dirname, abspath, isfile, join
try:
from pwd import getpwuid
except ModuleNotFoundError:
pass
from random import choice, randint
from socket import inet_ntoa
try:
from netifaces import interfaces, ifaddresses, gateways, AF_LINK, AF_INET, AF_INET6
except ModuleNotFoundError:
from socket import AF_INET, AF_INET6
from getmac import get_mac_address
from ifaddr import get_adapters
from netaddr import IPNetwork, IPAddress
from netaddr.core import AddrFormatError
from struct import pack, error
from ipaddress import IPv4Address, AddressValueError
from re import match, compile, search
import subprocess as sub
import psutil as ps
import socket as sock
from distro import linux_distribution
from prettytable import PrettyTable
from typing import Dict, List, Union
from paramiko import RSAKey, SSHClient, AutoAddPolicy, SSHException
from paramiko.ssh_exception import NoValidConnectionsError, AuthenticationException
from colorama import init, Fore, Style
from threading import Lock
# endregion
# region Authorship information
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
# endregion
# region Main class - Base
class Base:
# region Set variables
vendors: Dict[str, str] = vendors_dictionary
os_installed_packages_list = None
_lock: Lock = Lock()
_windows_mac_address_regex = compile(r'([0-9a-f]{2}-[0-9a-f]{2}-[0-9a-f]{2}-[0-9a-f]{2}-[0-9a-f]{2}-[0-9a-f]{2})')
_windows_adapters = None
_current_platform: Union[None, str] = None
_network_interfaces_multicast_macs: Dict[str, List[str]] = \
{'example-network-interface': ['33:33:00:00:00:02']}
_network_interfaces_settings: Dict[str, Dict[str, Union[None, bool, int, float, str, List[str]]]] = \
{'example-network-interface': {
'network-interface': 'example-network-interface',
'is-wireless': False,
'essid': 'AP',
'bssid': '12:34:56:78:90:ab',
'channel': 1,
'frequency': 2.4,
'mac-address': '12:34:56:78:90:ab',
'ipv4-address': '192.168.0.1',
'ipv6-link-address': 'fe80::1234:5678:90ab:cdef',
'ipv6-global-address': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'ipv6-global-addresses': ['fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', 'fc00:db20:35b:7399::5'],
'ipv4-netmask': '255.255.255.0',
'ipv4-network': '192.168.0.0/24',
'first-ipv4-address': '192.168.0.1',
'second-ipv4-address': '192.168.0.2',
'penultimate-ipv4-address': '192.168.0.253',
'last-ipv4-address': '192.168.0.254',
'ipv4-broadcast': '192.168.0.255',
'ipv4-gateway': '192.168.0.254',
'ipv6-gateway': 'fe80::1234:5678:8765:4321'
}}
# endregion
# region Init
def __init__(self,
admin_only: bool = True,
available_platforms: List[str] = ['Linux', 'Darwin', 'Windows']) -> None:
"""
Init
"""
# Check user is admin/root
if admin_only:
self.check_user()
# Check platform
self.check_platform(available_platforms=available_platforms)
# If current platform is Windows get network interfaces settings
if self.get_platform().startswith('Windows'):
self._windows_adapters = get_adapters()
init(convert=True)
self.cINFO: str = Style.BRIGHT + Fore.BLUE
self.cERROR: str = Style.BRIGHT + Fore.RED
self.cSUCCESS: str = Style.BRIGHT + Fore.GREEN
self.cWARNING: str = Style.BRIGHT + Fore.YELLOW
self.cEND: str = Style.RESET_ALL
self.c_info: str = self.cINFO + '[*]' + self.cEND + ' '
self.c_error: str = self.cERROR + '[-]' + self.cEND + ' '
self.c_success: str = self.cSUCCESS + '[+]' + self.cEND + ' '
self.c_warning: str = self.cWARNING + '[!]' + self.cEND + ' '
self.lowercase_letters: str = 'abcdefghijklmnopqrstuvwxyz'
self.uppercase_letters: str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.digits: str = '0123456789'
# endregion
# region Output functions
def get_banner(self, script_name: Union[None, str] = None) -> str:
"""
Get string of colored banner
:return: String of colored banner
"""
banner: str = \
self.cSUCCESS + \
" _ _ \n" + \
" _ __ __ ___ __ _ __ __ _ ___| | _____| |_ \n" + \
"| '__/ _` \ \ /\ / /___ | '_ \ / _` |/ __| |/ / _ \ __|\n" + \
"| | | (_| |\ V V /|___|| |_) | (_| | (__| < __/ |_ \n" + \
"|_| \__,_| \_/\_/ | .__/ \__,_|\___|_|\_\___|\__|\n" + \
" |_| v" + __version__ + "\n" + \
self.cEND + self.cWARNING + \
" https://raw-packet.github.io/\r\n" + self.cEND
if script_name is not None:
banner += '\n' + ' ' * (int((55 - len(script_name)) / 2)) + self.cINFO + script_name + self.cEND + '\n'
return banner
def print_banner(self, script_name: Union[None, str] = None) -> None:
"""
Print colored banner in console
:return: None
"""
print(self.get_banner(script_name))
def _color_print(self, color: str = 'blue', *strings: str) -> None:
"""
Print colored text in console
:param color: Set color: blue, red, orange, green (default: blue)
:param strings: Strings for printing in console
:return: None
"""
result_output_string: str = ''
if color == 'blue':
result_output_string += self.c_info
elif color == 'red':
result_output_string += self.c_error
elif color == 'orange':
result_output_string += self.c_warning
elif color == 'green':
result_output_string += self.c_success
else:
result_output_string += self.c_info
for index in range(len(strings)):
if index % 2 == 0:
result_output_string += strings[index]
else:
if color == 'blue':
result_output_string += self.cINFO
if color == 'red':
result_output_string += self.cERROR
if color == 'orange':
result_output_string += self.cWARNING
if color == 'green':
result_output_string += self.cSUCCESS
result_output_string += strings[index] + self.cEND
self._lock.acquire()
print(result_output_string)
self._lock.release()
def _color_text(self, color: str = 'blue', string: str = '') -> str:
"""
Make colored string
:param color: Set color: blue, red, orange, green (default: blue)
:param string: Input string (example: 'test')
:return: Colored string (example: '\033[1;34mtest\033[0m')
"""
if color == 'blue':
return self.cINFO + string + self.cEND
elif color == 'red':
return self.cERROR + string + self.cEND
elif color == 'orange':
return self.cWARNING + string + self.cEND
elif color == 'green':
return self.cSUCCESS + string + self.cEND
else:
return self.cINFO + string + self.cEND
def print_info(self, *strings: str) -> None:
"""
Print informational text in console
:param strings: Strings for printing in console
:return: None
"""
self._color_print('blue', *strings)
def print_error(self, *strings: str) -> None:
"""
Print error text in console
:param strings: Strings for printing in console
:return: None
"""
self._color_print('red', *strings)
def print_warning(self, *strings: str) -> None:
"""
Print warning text in console
:param strings: Strings for printing in console
:return: None
"""
self._color_print('orange', *strings)
def print_success(self, *strings: str) -> None:
"""
Print success text in console
:param strings: Strings for printing in console
:return: None
"""
self._color_print('green', *strings)
def info_text(self, text: str) -> str:
"""
Make information text
:param text: Input string (example: 'test')
:return: Colored string (example: '\033[1;34mtest\033[0m')
"""
return self._color_text('blue', text)
def error_text(self, text: str) -> str:
"""
Make error text
:param text: Input string (example: 'test')
:return: Colored string (example: '\033[1;31mtest\033[0m')
"""
return self._color_text('red', text)
def warning_text(self, text: str) -> str:
"""
Make warning text
:param text: Input string (example: 'test')
:return: Colored string (example: '\033[1;32mtest\033[0m')
"""
return self._color_text('orange', text)
def success_text(self, text: str) -> str:
"""
Make success text
:param text: Input string (example: 'test')
:return: Colored string (example: '\033[1;33mtest\033[0m')
"""
return self._color_text('green', text)
# endregion
# region Check platform and user functions
def get_platform(self) -> str:
"""
Get your platform
:return: Platform string (example: 'Windows 10' or 'Darwin 19.0.0' or 'Linux Ubuntu 18.04')
"""
if self._current_platform is None:
linux_dist = linux_distribution()
try:
assert linux_dist[0] != '' and linux_dist[1] != '' and linux_dist[0] != system()
self._current_platform = str(system()) + ' ' + str(linux_dist[0]) + ' ' + str(linux_dist[1])
except AssertionError:
self._current_platform = str(system()) + ' ' + str(release())
return self._current_platform
def check_platform(self,
available_platforms: List[str] = ['Linux', 'Darwin', 'Windows'],
exit_on_failure: bool = True,
exit_code: int = 1,
quiet: bool = False) -> bool:
"""
Check Python version and OS
:param available_platforms: Available Platforms list (example: ['Linux', 'Darwin', 'Windows'])
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 1)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True if OS is Linux or False if not
"""
for available_platform in available_platforms:
if available_platform in self.get_platform():
return True
if not quiet:
print('This script can run only on: ' + ' and '.join(available_platforms))
print('Your platform: ' + self.get_platform() + ' not supported!')
if exit_on_failure:
exit(exit_code)
return False
@staticmethod
def check_user(exit_on_failure: bool = True,
exit_code: int = 2,
quiet: bool = False) -> bool:
"""
Check user privileges
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 2)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True if user is root or False if not
"""
try:
if getuid() != 0:
if not quiet:
print('Only root can run this script!')
print('User: ' + str(getpwuid(getuid())[0]) + ' can not run this script!')
if exit_on_failure:
exit(exit_code)
return False
except NameError:
if windll.shell32.IsUserAnAdmin() == 0:
if not quiet:
print('Only Administartor can run this script!')
if exit_on_failure:
exit(exit_code)
return False
return True
# endregion
# region Pack functions
@staticmethod
def pack8(data: Union[int, str, bytes],
| |
import networkx as nx
import csv
import pandas as pd
import itertools
import json
import dedupe
from itertools import combinations,product
import sys
import os
import numpy as np
from affinegap import normalizedAffineGapDistance
import simplejson
from tqdm import tqdm
import tempfile
from dedupe.clustering import cluster as dedupe_cluster
import dm_file_checker
def get_deduper_probs_and_threshold(deduper, unlabeled_data, blocked_data = None, recall_weight = 1):
if blocked_data is None:
pairs = deduper.pairs(unlabeled_data)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(deduper, blocked_data))
probs = dedupe.core.scoreDuplicates(pairs,
deduper.data_model,
deduper.classifier,
deduper.num_cores)['score']
# the memory mapped file location of the scored records
temp_filename = probs.filename
probs = probs.copy()
probs.sort()
probs = probs[::-1]
# delete the memory mapped file so it won't clog the disk
os.remove(temp_filename)
expected_dupes = np.cumsum(probs)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / np.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = np.argmax(score)
print('Maximum expected recall and precision')
print('recall: {:.2f}%'.format(recall[i]*100))
print('precision: {:.2f}%'.format(precision[i]*100))
print('With threshold: {:.2f}%'.format(probs[i]*100))
return probs, probs[i]
def get_linker_probs_and_threshold(linker, unlabeled_data_1, unlabeled_data_2, blocked_data = None, recall_weight = 1):
if blocked_data is None:
pairs = linker.pairs(unlabeled_data_1, unlabeled_data_2)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(linker, blocked_data))
probs = dedupe.core.scoreDuplicates(pairs,
linker.data_model,
linker.classifier,
linker.num_cores)['score']
# the memory mapped file location of the scored records
temp_filename = probs.filename
probs = probs.copy()
probs.sort()
probs = probs[::-1]
# delete the memory mapped file so it won't clog the disk
os.remove(temp_filename)
expected_dupes = np.cumsum(probs)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / np.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = np.argmax(score)
print('Maximum expected recall and precision')
print('recall: {:.2f}%'.format(recall[i]*100))
print('precision: {:.2f}%'.format(precision[i]*100))
print('With threshold: {:.2f}%'.format(probs[i]*100))
return probs, probs[i]
def get_model_weights(deduper_or_linker):
fields = [field.name for field in deduper_or_linker.data_model._variables]
model_weights = sorted(list(zip(fields, deduper_or_linker.classifier.weights)), key = lambda x: x[1], reverse = False)
model_weights = pd.DataFrame(model_weights, columns = ["variable", "logistic_reg_weight"])
return model_weights
def map_cluster_ids(deduper, unlabeled_data, threshold, hard_threshold = 0.0,
blocked_data = None, canonicalize = True, numeric_fields = None,
cluster_id_tag = None,
mapped_records_filepath = None,
cluster_canonical_filepath = None):
# BADLY NEED TO REFACTOR THIS
"""
Function that maps record ids to cluster ids
Parameters
----------
deduper : dedupe.Deduper
A trained instance of dedupe.
unlabeled_data : dict
The dedupe formatted data dictionary.
threshold : dedupe.Threshold
The threshold used for clustering.
hard_threshold: float
Threshold for record pair scores that will be included in the clustering
canonicalize : bool or list, default False
Option that provides the canonical records as additional columns.
Specifying a list of column names only canonicalizes those columns.
numeric_fields: list of str, default None
Specify which fields are numeric
cluster_id_tag: str, default None
Additional tag for distinguishing the cluster id of different datasets
Returns
-------
mapped_records
A dataframe storing the mapping from cluster_id to record_id
cluster_canonicals
A dataframe storing the canonical representation per cluster_id
"""
assert (hard_threshold < 1) and (hard_threshold >= 0), "hard_threshold should less than 1 at at least 0.0"
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "w", newline = "") as f:
mapped_records_header = ["record id", "cluster id", "confidence score", "cluster type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writeheader()
if canonicalize:
if cluster_canonical_filepath is not None:
with open(cluster_canonical_filepath, "w", newline = "") as f:
cluster_canonical_header = [field.field for field in deduper.data_model.primary_fields]
cluster_canonical_header.append("cluster id")
writer = csv.DictWriter(f, fieldnames = cluster_canonical_header, quoting = csv.QUOTE_ALL)
writer.writeheader()
else:
assert cluster_canonical_filepath is None, "can't have canonicalize be False if cluster_canonical_filepath exists"
# ## Clustering
if blocked_data is None:
pairs = deduper.pairs(unlabeled_data)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(deduper, blocked_data))
pair_scores = deduper.score(pairs)
pair_scores = pair_scores[pair_scores["score"] > hard_threshold]
clustered_dupes = deduper.cluster(pair_scores, threshold)
if numeric_fields is not None:
assert isinstance(numeric_fields, list)
mapped_records = []
cluster_canonicals = []
record_ids_in_clusters = []
# assign cluster ids to record ids
i = 0
print("Mapping cluster ids...")
for cluster in tqdm(clustered_dupes):
i += 1
cluster_id = "cl-{}".format(i)
if cluster_id_tag is not None:
cluster_id = "{}-{}".format(cluster_id_tag, cluster_id)
id_set, scores = cluster
if canonicalize:
cluster_data = [unlabeled_data[i] for i in id_set]
canonical_rep = get_canonical_rep(cluster_data, numeric_fields = numeric_fields)
canonical_rep["cluster id"] = cluster_id
if cluster_canonical_filepath is not None:
with open(cluster_canonical_filepath, "a") as f:
writer = csv.DictWriter(f, fieldnames = cluster_canonical_header, quoting = csv.QUOTE_ALL)
writer.writerow(canonical_rep)
else:
cluster_canonicals.append(canonical_rep)
for record_id, score in zip(id_set, scores):
record_dict = {
"record id": record_id,
"cluster id": cluster_id,
"confidence score": score,
"cluster type":'dup'
}
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "a", newline = "") as f:
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerow(record_dict)
else:
mapped_records.append(record_dict)
record_ids_in_clusters.append(record_id)
record_ids_in_clusters = set(record_ids_in_clusters)
solo_ids = list(set(unlabeled_data.keys()).difference(record_ids_in_clusters))
# assign solo ids to record ids
print("Mapping solo record ids...")
for record_id in tqdm(solo_ids):
i += 1
cluster_id = "cl-{}".format(i)
if cluster_id_tag is not None:
cluster_id = "{}-{}".format(cluster_id_tag, cluster_id)
record_dict = {
"record id":record_id,
"cluster id":cluster_id,
"confidence score":None,
"cluster type":'solo'
}
mapped_records.append(record_dict)
if mapped_records_filepath is None:
mapped_records = pd.DataFrame(mapped_records)
else:
with open(mapped_records_filepath, "a", newline = "") as f:
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerows(mapped_records)
mapped_records = None
if cluster_canonical_filepath is None:
cluster_canonicals = pd.DataFrame(cluster_canonicals)
else:
cluster_canonicals = None
# delete temporary file generated for pair_scores
try:
mmap_file = pair_scores.filename
del pair_scores
os.remove(mmap_file)
except AttributeError:
pass
if canonicalize:
return mapped_records, cluster_canonicals
else:
return mapped_records
def abs_distance(x,y):
return np.abs(x-y)
def get_canonical_rep(record_cluster, numeric_fields = None):
"""
Given a list of records within a duplicate cluster, constructs a
canonical representation of the cluster by finding canonical
values for each field
"""
canonical_rep = {}
keys = record_cluster[0].keys()
if numeric_fields is None:
numeric_fields = []
for key in keys:
key_values = []
# difference distance functions for numeric and non-numeric fields
if key in numeric_fields:
comparator = abs_distance
else:
comparator = normalizedAffineGapDistance
for record in record_cluster:
# assume non-empty values always better than empty value
# for canonical record
if record[key]:
key_values.append(record[key])
if key_values:
canonical_rep[key] = dedupe.canonical.getCentroid(key_values, comparator)
else:
canonical_rep[key] = ''
return canonical_rep
def get_linked_ids(linker, unlabeled_data_1, unlabeled_data_2, threshold, hard_threshold = 0.0, blocked_data = None,
mapped_records_filepath = None, constraint = "one-to-one"):
# BADLY NEED TO REFACTOR THIS
"""
constraint: What type of constraint to put on a join.
'one-to-one'
Every record in data_1 can match at most
one record from data_2 and every record
from data_2 can match at most one record
from data_1. This is good for when both
data_1 and data_2 are from different
sources and you are interested in
matching across the sources. If,
individually, data_1 or data_2 have many
duplicates you will not get good
matches.
'many-to-one'
Every record in data_1 can match at most
one record from data_2, but more than
one record from data_1 can match to the
same record in data_2. This is good for
when data_2 is a lookup table and data_1
is messy, such as geocoding or matching
against golden records.
'many-to-many'
Every record in data_1 can match
multiple records in data_2 and vice
versa. This is like a SQL inner join.
"""
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "w", newline = "") as f:
mapped_records_header = ["record id 1", "record id 2", "confidence score", "link type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writeheader()
## link matching
if blocked_data is None:
pairs = linker.pairs(unlabeled_data_1, unlabeled_data_2)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(linker, blocked_data))
pair_scores = linker.score(pairs)
pair_scores = pair_scores[pair_scores["score"] > hard_threshold]
assert constraint in {'one-to-one', 'many-to-one', 'many-to-many'}, (
'%s is an invalid constraint option. Valid options include '
'one-to-one, many-to-one, or many-to-many' % constraint)
if constraint == 'one-to-one':
links = linker.one_to_one(pair_scores, threshold)
elif constraint == 'many-to-one':
links = linker.many_to_one(pair_scores, threshold)
elif constraint == 'many-to-many':
links = pair_scores[pair_scores['score'] > threshold]
links = list(links)
# delete temporary file generated for pair_scores
try:
mmap_file = pair_scores.filename
del pair_scores
os.remove(mmap_file)
except AttributeError:
pass
mapped_records = []
ids_with_links_1 = []
ids_with_links_2 = []
print("Mapping linked pairs...")
for record_pair in tqdm(links):
record_ids, score = record_pair
pair_dict = {
"record id 1":record_ids[0],
"record id 2":record_ids[1],
"confidence score":score,
"link type":"dup",
}
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "a", newline = "") as f:
mapped_records_header = ["record id 1", "record id 2", "confidence score", "link type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
| |
TimelineCategory.MissionSelected,
Missions.Contact,
ActionTest.NoAT,
),
(
"spy",
"02:00.0",
"seduce target selected.",
(None,),
(2 * 60 + 00.0),
None,
(None,),
(None,),
TimelineCategory.MissionSelected,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"02:00.0",
"bug ambassador enabled.",
(None,),
(2 * 60 + 00.0),
None,
(None,),
(None,),
TimelineCategory.MissionEnabled,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"02:00.0",
"contact double agent enabled.",
(None,),
(2 * 60 + 00.0),
None,
(None,),
(None,),
TimelineCategory.MissionEnabled,
Missions.Contact,
ActionTest.NoAT,
),
(
"spy",
"02:00.0",
"seduce target enabled.",
(None,),
(2 * 60 + 00.0),
None,
(None,),
(None,),
TimelineCategory.MissionEnabled,
Missions.Seduce,
ActionTest.NoAT,
),
(
"game",
"02:00.0",
"game started.",
(None,),
(2 * 60 + 00.0),
None,
(None,),
(None,),
TimelineCategory.GameStart,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"01:58.8",
"spy player takes control from ai.",
(None,),
(1 * 60 + 58.8),
None,
(None,),
(None,),
TimelineCategory.NoCategory,
Missions.NoMission,
ActionTest.NoAT,
),
(
"sniper",
"01:58.7",
"marked suspicious.",
(Characters.Taft,),
(1 * 60 + 58.7),
None,
(Roles.Ambassador,),
(None,),
TimelineCategory.SniperLights,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"01:58.7",
"spy leaves conversation.",
(None,),
(1 * 60 + 58.7),
None,
(None,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"sniper",
"01:56.3",
"marked spy suspicious.",
(Characters.Smallman,),
(1 * 60 + 56.3),
None,
(Roles.Spy,),
(None,),
TimelineCategory.SniperLights,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"01:56.1",
"spy enters conversation.",
(None,),
(1 * 60 + 56.1),
None,
(None,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"01:56.1",
"spy joined conversation with double agent.",
(Characters.Irish,),
(1 * 60 + 56.1),
None,
(Roles.DoubleAgent,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"01:49.7",
"action triggered: contact double agent",
(None,),
(1 * 60 + 49.7),
None,
(None,),
(None,),
TimelineCategory.ActionTriggered,
Missions.Contact,
ActionTest.NoAT,
),
(
"spy",
"01:49.7",
"real banana bread started.",
(None,),
(1 * 60 + 49.7),
None,
(None,),
(None,),
TimelineCategory.BananaBread,
Missions.Contact,
ActionTest.NoAT,
),
(
"spy",
"01:48.8",
"action test green: contact double agent",
(None,),
(1 * 60 + 48.8),
None,
(None,),
(None,),
TimelineCategory.ActionTest,
Missions.Contact,
ActionTest.Green,
),
(
"spy",
"01:48.8",
"banana bread uttered.",
(None,),
(1 * 60 + 48.8),
None,
(None,),
(None,),
TimelineCategory.BananaBread,
Missions.Contact,
ActionTest.NoAT,
),
(
"spy",
"01:48.3",
"double agent contacted.",
(Characters.Irish,),
(1 * 60 + 48.3),
None,
(Roles.DoubleAgent,),
(None,),
TimelineCategory.MissionComplete,
Missions.Contact,
ActionTest.NoAT,
),
(
"sniper",
"01:44.1",
"took shot.",
(Characters.Smallman,),
(1 * 60 + 44.1),
None,
(Roles.Spy,),
(None,),
TimelineCategory.SniperShot,
Missions.NoMission,
ActionTest.NoAT,
),
(
"game",
"01:40.9",
"sniper shot spy.",
(Characters.Smallman,),
(1 * 60 + 40.9),
None,
(Roles.Spy,),
(None,),
TimelineCategory.GameEnd,
Missions.NoMission,
ActionTest.NoAT,
),
],
),
(
"clock_remaining",
[
(
"spy",
"01:51.5",
"begin planting bug while standing.",
(Characters.Wheels,),
(1 * 60 + 51.5),
None,
(Roles.Ambassador,),
(None,),
TimelineCategory.NoCategory,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"01:49.8",
"bugged ambassador while standing.",
(Characters.Wheels,),
(1 * 60 + 49.8),
None,
(Roles.Ambassador,),
(None,),
TimelineCategory.MissionComplete,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"01:48.9",
"action triggered: seduce target",
(None,),
(1 * 60 + 48.9),
None,
(None,),
(None,),
TimelineCategory.ActionTriggered,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"01:48.9",
"begin flirtation with seduction target.",
(Characters.Oprah,),
(1 * 60 + 48.9),
None,
(Roles.SeductionTarget,),
(None,),
TimelineCategory.NoCategory,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"01:48.2",
"action test white: seduce target",
(None,),
(1 * 60 + 48.2),
None,
(None,),
(None,),
TimelineCategory.ActionTest,
Missions.Seduce,
ActionTest.White,
),
(
"spy",
"01:48.2",
"flirt with seduction target: 59%",
(Characters.Oprah,),
(1 * 60 + 48.2),
None,
(Roles.SeductionTarget,),
(None,),
TimelineCategory.MissionPartial,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"01:35.8",
"spy leaves conversation.",
(None,),
95.8,
None,
(None,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"01:35.8",
"spy left conversation with double agent.",
(Characters.Salmon,),
95.8,
None,
(Roles.DoubleAgent,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"01:30.6",
"spy enters conversation.",
(None,),
90.6,
None,
(None,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"01:29.0",
"flirtation cooldown expired.",
(None,),
89.0,
None,
(None,),
(None,),
TimelineCategory.NoCategory,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"01:03.3",
"spy leaves conversation.",
(None,),
63.3,
None,
(None,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"00:57.6",
"spy enters conversation.",
(None,),
57.6,
None,
(None,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"00:57.0",
"action triggered: seduce target",
(None,),
57.0,
None,
(None,),
(None,),
TimelineCategory.ActionTriggered,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"00:57.0",
"begin flirtation with seduction target.",
(Characters.Oprah,),
57.0,
None,
(Roles.SeductionTarget,),
(None,),
TimelineCategory.NoCategory,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"00:55.9",
"action test green: seduce target",
(None,),
55.9,
None,
(None,),
(None,),
TimelineCategory.ActionTest,
Missions.Seduce,
ActionTest.Green,
),
(
"spy",
"00:55.9",
"flirt with seduction target: 100%",
(Characters.Oprah,),
55.9,
None,
(Roles.SeductionTarget,),
(None,),
TimelineCategory.MissionPartial,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"00:55.9",
"target seduced.",
(Characters.Oprah,),
55.9,
None,
(Roles.SeductionTarget,),
(None,),
TimelineCategory.MissionComplete,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"00:43.9",
"spy leaves conversation.",
(None,),
43.9,
None,
(None,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"00:39.3",
"picked up statue.",
(None,),
39.3,
None,
(None,),
(None,),
TimelineCategory.Statues,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"00:36.1",
"action triggered: swap statue",
(None,),
36.1,
None,
(None,),
(None,),
TimelineCategory.Statues | TimelineCategory.ActionTriggered,
Missions.Swap,
ActionTest.NoAT,
),
(
"spy",
"00:35.5",
"action test green: swap statue",
(None,),
35.5,
None,
(None,),
(None,),
TimelineCategory.Statues | TimelineCategory.ActionTest,
Missions.Swap,
ActionTest.Green,
),
(
"spy",
"00:35.5",
"statue swap pending.",
(None,),
35.5,
None,
(None,),
(None,),
TimelineCategory.Statues,
Missions.Swap,
ActionTest.NoAT,
),
(
"game",
"00:35.5",
"missions completed. countdown pending.",
(None,),
35.5,
None,
(None,),
(None,),
TimelineCategory.MissionCountdown,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"00:34.5",
"put back statue.",
(None,),
34.5,
None,
(None,),
(None,),
TimelineCategory.Statues,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"00:21.7",
"action triggered: check watch",
(None,),
21.7,
None,
(None,),
(None,),
TimelineCategory.Watch | TimelineCategory.ActionTriggered,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"00:21.7",
"watch checked.",
(Characters.Smallman,),
21.7,
None,
(Roles.Spy,),
(None,),
TimelineCategory.Watch,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"00:16.3",
"character picked up pending statue.",
(Characters.Bling,),
16.3,
None,
(Roles.Civilian,),
(None,),
TimelineCategory.Statues,
Missions.Swap,
ActionTest.NoAT,
),
(
"spy",
"00:13.9",
"statue swapped.",
(Characters.Bling,),
13.9,
None,
(Roles.Civilian,),
(None,),
TimelineCategory.Statues | TimelineCategory.MissionComplete,
Missions.Swap,
ActionTest.NoAT,
),
(
"game",
"00:13.9",
"missions completed. 10 second countdown.",
(None,),
13.9,
None,
(None,),
(None,),
TimelineCategory.MissionCountdown,
Missions.NoMission,
ActionTest.NoAT,
),
(
"game",
"00:03.9",
"missions completed successfully.",
(None,),
3.9,
None,
(None,),
(None,),
TimelineCategory.GameEnd,
Missions.NoMission,
ActionTest.NoAT,
),
],
),
(
"clock_elapsed",
[
(
"spy",
"128.50",
"begin planting bug while standing.",
(Characters.Wheels,),
None,
128.50,
(Roles.Ambassador,),
(None,),
TimelineCategory.NoCategory,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"130.13",
"bugged ambassador while standing.",
(Characters.Wheels,),
None,
130.13,
(Roles.Ambassador,),
(None,),
TimelineCategory.MissionComplete,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"131.06",
"action triggered: seduce target",
(None,),
None,
131.06,
(None,),
(None,),
TimelineCategory.ActionTriggered,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"131.06",
"begin flirtation with seduction target.",
(Characters.Oprah,),
None,
131.06,
(Roles.SeductionTarget,),
(None,),
TimelineCategory.NoCategory,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"131.75",
"action test white: seduce target",
(None,),
None,
131.75,
(None,),
(None,),
TimelineCategory.ActionTest,
Missions.Seduce,
ActionTest.White,
),
(
"spy",
"131.75",
"flirt with seduction target: 59%",
(Characters.Oprah,),
None,
131.75,
(Roles.SeductionTarget,),
(None,),
TimelineCategory.MissionPartial,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"144.19",
"spy leaves conversation.",
(None,),
None,
144.19,
(None,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"144.19",
"spy left conversation with double agent.",
(Characters.Salmon,),
None,
144.19,
(Roles.DoubleAgent,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"149.31",
"spy enters conversation.",
(None,),
None,
149.31,
(None,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"150.94",
"flirtation cooldown expired.",
(None,),
None,
150.94,
(None,),
(None,),
TimelineCategory.NoCategory,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"176.63",
"spy leaves conversation.",
(None,),
None,
176.63,
(None,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"182.38",
"spy enters conversation.",
(None,),
None,
182.38,
(None,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"183.00",
"action triggered: seduce target",
(None,),
None,
183.0,
(None,),
(None,),
TimelineCategory.ActionTriggered,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"183.00",
"begin flirtation with seduction target.",
(Characters.Oprah,),
None,
183.0,
(Roles.SeductionTarget,),
(None,),
TimelineCategory.NoCategory,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"184.06",
"action test green: seduce target",
(None,),
None,
184.06,
(None,),
(None,),
TimelineCategory.ActionTest,
Missions.Seduce,
ActionTest.Green,
),
(
"spy",
"184.06",
"flirt with seduction target: 100%",
(Characters.Oprah,),
None,
184.06,
(Roles.SeductionTarget,),
(None,),
TimelineCategory.MissionPartial,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"184.06",
"target seduced.",
(Characters.Oprah,),
None,
184.06,
(Roles.SeductionTarget,),
(None,),
TimelineCategory.MissionComplete,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"196.06",
"spy leaves conversation.",
(None,),
None,
196.06,
(None,),
(None,),
TimelineCategory.Conversation,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"200.69",
"picked up statue.",
(None,),
None,
200.69,
(None,),
(None,),
TimelineCategory.Statues,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"203.81",
"action triggered: swap statue",
(None,),
None,
203.81,
(None,),
(None,),
TimelineCategory.Statues | TimelineCategory.ActionTriggered,
Missions.Swap,
ActionTest.NoAT,
),
(
"spy",
"204.50",
"action test green: swap statue",
(None,),
None,
204.50,
(None,),
(None,),
TimelineCategory.Statues | TimelineCategory.ActionTest,
Missions.Swap,
ActionTest.Green,
),
(
"spy",
"204.50",
"statue swap pending.",
(None,),
None,
204.50,
(None,),
(None,),
TimelineCategory.Statues,
Missions.Swap,
ActionTest.NoAT,
),
(
"game",
"204.50",
"missions completed. countdown pending.",
(None,),
None,
204.50,
(None,),
(None,),
TimelineCategory.MissionCountdown,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"205.50",
"put back statue.",
(None,),
None,
205.50,
(None,),
(None,),
TimelineCategory.Statues,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"218.25",
"action triggered: check watch",
(None,),
None,
218.25,
(None,),
(None,),
TimelineCategory.Watch | TimelineCategory.ActionTriggered,
Missions.NoMission,
ActionTest.NoAT,
),
(
| |
#
# Copyright (c) 2017-2022 by QA Cafe.
# All Rights Reserved.
#
from builtins import input
import getpass
import io
import os
import re
import requests
from threading import Lock
from requests_toolbelt.downloadutils import stream
from requests_toolbelt import sessions
from requests_toolbelt.utils.user_agent import user_agent
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.exceptions import HTTPError
from marshmallow import Schema, fields, post_load
from . import __version__
from .cdr_error import CDRouterError
from .cdr_datetime import DateTime
from .alerts import AlertsService
from .configs import ConfigsService
from .devices import DevicesService
from .attachments import AttachmentsService
from .jobs import JobsService
from .packages import PackagesService
from .results import ResultsService
from .testresults import TestResultsService
from .annotations import AnnotationsService
from .captures import CapturesService
from .highlights import HighlightsService
from .imports import ImportsService
from .exports import ExportsService
from .history import HistoryService
from .system import SystemService
from .tags import TagsService
from .testsuites import TestsuitesService
from .users import UsersService, UserSchema
class Links(object):
"""Class representing paging information returned by ``list`` calls to the CDRouter Web API.
:param first: (optional) First page number as an int.
:param last: (optional) Last page number as an int.
:param current: (optional) Current page number as an int.
:param total: (optional) Total element count across all pages as an int.
:param limit: (optional) Resources per page limit as an int.
:param next: (optional) Next page number as an int.
:param prev: (optional) Previous page number as an int.
"""
def __init__(self, **kwargs):
self.first = kwargs.get('first', None)
self.last = kwargs.get('last', None)
self.current = kwargs.get('current', None)
self.total = kwargs.get('total', None)
self.limit = kwargs.get('limit', None)
self.next = kwargs.get('next', None)
self.prev = kwargs.get('prev', None)
class LinksSchema(Schema):
first = fields.Int()
last = fields.Int()
current = fields.Int()
total = fields.Int()
limit = fields.Int()
next = fields.Int(missing=None)
prev = fields.Int(missing=None)
@post_load
def post_load(self, data):
return Links(**data)
class Response(object):
def __init__(self, **kwargs):
self.timestamp = kwargs.get('timestamp', None)
self.error = kwargs.get('error', None)
self.data = kwargs.get('data', None)
self.links = kwargs.get('links', None)
class ResponseSchema(Schema):
timestamp = DateTime()
error = fields.Str(missing=None)
data = fields.Dict(missing=None)
@post_load
def post_load(self, data):
return Response(**data)
class ListResponseSchema(ResponseSchema):
data = fields.List(fields.Dict(), missing=None)
links = fields.Nested(LinksSchema, missing=None)
@post_load
def post_load(self, data):
return Response(**data)
class Share(object):
"""Model for CDRouter Shares.
:param user_id: (optional) User ID as an int.
:param read: (optional) Bool `True` if reading is allowed.
:param write: (optional) Bool `True` if writing is allowed.
:param execute: (optional) Bool `True` if executing is allowed.
"""
def __init__(self, **kwargs):
self.user_id = kwargs.get('user_id', None)
self.read = kwargs.get('read', None)
self.write = kwargs.get('write', None)
self.execute = kwargs.get('execute', None)
class ShareSchema(Schema):
user_id = fields.Int(as_string=True)
read = fields.Bool()
write = fields.Bool()
execute = fields.Bool()
@post_load
def post_load(self, data):
return Share(**data)
class Auth(requests.auth.AuthBase): # pylint: disable=too-few-public-methods
"""Class for authorizing CDRouter Web API requests."""
def __init__(self, c):
self.c = c
def __call__(self, r):
if r.method == 'POST' and r.path_url.startswith('/authenticate'):
return r
self.c.lock.acquire()
token = self.c.token
self.c.lock.release()
if token is None:
# if API request with no token returns a 401, automatic
# login is disabled and user needs to authenticate
resp = requests.get(self.c.base + self.c.BASE + 'system/hostname/', verify=(not self.c.insecure))
if resp.status_code == 401:
self.c.authenticate(self.c.retries)
self.c.lock.acquire()
token = self.c.token
self.c.lock.release()
if token is not None:
r.headers['authorization'] = 'Bearer ' + token
return r
def _getuser_default(base):
return input('username on {}: '.format(base))
def _getpass_default(base, username):
return getpass.getpass('{}\'s password on {}: '.format(username, base))
class CDRouter(object):
"""Service for accessing the CDRouter Web API.
:param base: Base HTTP or HTTPS URL for CDRouter system as a
string, optionally including a port. For example
`http://localhost`, `http://cdrouter.lan:8015` or
`https://127.0.0.1`.
:param token: (optional) CDRouter API token as a string. Not
required if Automatic Login is enabled. If omitted, value
will be taken from CDROUTER_API_TOKEN environment variable.
:param username: (optional) Username as string. Can be omitted if
``token`` is specified or Automatic Login is enabled. If
omitted, ``_getuser`` will be called when a username is
required.
:param password: (optional) Password as string. Can be omitted if
``token`` is specified or Automatic Login is enabled. If
omitted, ``_getpass`` will be called when a password is
required.
:param _getuser: (optional) If username is `None`, function to be
called as ``_getuser(base)`` which returns a username as a
string. If ``_getuser`` is `None`, ``cdrouter`` will print a
prompt to stdout and read the username from stdin.
:param _getpass: (optional) If password is `None`, a function to
be called as ``_getpass(base, username)`` which returns user's
password as a string. If ``_getpass`` is `None`, ``cdrouter``
will print a password prompt to stdout and read the password
from stdin.
:param retries: (optional) The number of times to retry authentication
with the CDRouter system before giving up as an int.
:param insecure: (optional) If bool `True` and `base` is an HTTPS
URL, skip certificate verification and allow insecure
connections to the CDRouter system.
"""
BASE = '/api/v1/'
def __init__(self, base, token=None, username=None, password=<PASSWORD>, _getuser=_getuser_default, _getpass=_getpass_default, retries=3, insecure=False):
self.lock = Lock()
self.base = base.rstrip('/')
self.token = token or os.environ.get('CDROUTER_API_TOKEN')
self.username = username
self.password = password
self._getuser = _getuser
self._getpass = _getpass
self.retries = retries
self.insecure = insecure
if insecure:
# disable annoying InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
self.session = sessions.BaseUrlSession(base_url=self.base+self.BASE)
#: :class:`alerts.AlertsService <alerts.AlertsService>` object
self.alerts = AlertsService(self)
#: :class:`configs.ConfigsService <configs.ConfigsService>` object
self.configs = ConfigsService(self)
#: :class:`devices.DevicesService <devices.DevicesService>` object
self.devices = DevicesService(self)
#: :class:`attachments.AttachmentsService <attachments.AttachmentsService>` object
self.attachments = AttachmentsService(self)
#: :class:`jobs.JobsService <jobs.JobsService>` object
self.jobs = JobsService(self)
#: :class:`packages.PackagesService <packages.PackagesService>` object
self.packages = PackagesService(self)
#: :class:`results.ResultsService <results.ResultsService>` object
self.results = ResultsService(self)
#: :class:`testresults.TestResultsService <testresults.TestResultsService>` object
self.tests = TestResultsService(self)
#: :class:`annotations.AnnotationsService <annotations.AnnotationsService>` object
self.annotations = AnnotationsService(self)
#: :class:`captures.CapturesService <captures.CapturesService>` object
self.captures = CapturesService(self)
#: :class:`highlights.HighlightsService <highlights.HighlightsService>` object
self.highlights = HighlightsService(self)
#: :class:`imports.ImportsService <imports.ImportsService>` object
self.imports = ImportsService(self)
#: :class:`exports.ExportsService <exports.ExportsService>` object
self.exports = ExportsService(self)
#: :class:`history.HistoryService <history.HistoryService>` object
self.history = HistoryService(self)
#: :class:`system.SystemService <system.SystemService>` object
self.system = SystemService(self)
#: :class:`tags.TagsService <tags.TagsService>` object
self.tags = TagsService(self)
#: :class:`testsuites.TestsuitesService <testsuites.TestsuitesService>` object
self.testsuites = TestsuitesService(self)
#: :class:`users.UsersService <users.UsersService>` object
self.users = UsersService(self)
# base request methods
def _req(self, path, method='GET', json=None, data=None, params=None, headers=None, files=None, stream=None):
if params is None:
params = {}
if headers is None:
headers = {}
if files is None:
files = {}
headers.update({'user-agent': user_agent('cdrouter.py', __version__)})
resp = self.session.request(method, path, params=params, headers=headers, files=files, stream=stream,
json=json, data=data, verify=(not self.insecure), auth=Auth(c=self))
self.raise_for_status(resp)
return resp
def get(self, path, params=None, stream=None):
return self._req(path, method='GET', params=params, stream=stream)
def post(self, path, json=None, data=None, params=None, files=None, stream=None):
return self._req(path, method='POST', json=json, data=data, params=params, stream=stream, files=files)
def patch(self, path, json, params=None):
return self._req(path, method='PATCH', json=json, params=params)
def delete(self, path, params=None):
return self._req(path, method='DELETE', params=params)
# cdrouter-specific request methods
def list(self, base, filter=None, type=None, sort=None, limit=None, page=None, format=None, detailed=None): # pylint: disable=redefined-builtin
if sort != None:
if not isinstance(sort, list):
sort = [sort]
sort = ','.join(sort)
if detailed != None:
detailed = bool(detailed)
return self.get(base, params={'filter': filter, 'type': type, 'sort': sort, 'limit': limit,
'page': page, 'format': format, 'detailed': detailed})
def iter_list(self, list_fn, *args, **kwargs):
while True:
data, links = list_fn(*args, **kwargs)
for d in data:
yield d
if links.next is None:
break
kwargs.update({'page': links.next})
def get_id(self, base, id, params=None, stream=None): # pylint: disable=invalid-name,redefined-builtin
return self.get(base+str(id)+'/', params=params, stream=stream)
def create(self, base, resource):
return self.post(base, json=resource)
def edit(self, base, id, resource): # pylint: disable=invalid-name,redefined-builtin
return self.patch(base+str(id)+'/', json=resource)
def delete_id(self, base, id): # pylint: disable=invalid-name,redefined-builtin
return self.delete(base+str(id)+'/')
def get_shares(self, base, id): # pylint: disable=invalid-name,redefined-builtin
schema = ShareSchema()
resp = self.get(base+str(id)+'/shares/')
return self.decode(schema, resp, many=True)
def edit_shares(self, base, id, user_ids): # pylint: disable=invalid-name,redefined-builtin
schema = ShareSchema()
resp = self.patch(base+str(id)+'/shares/', json={'user_ids': list(map(int, user_ids))})
return self.decode(schema, resp, many=True)
def filename(self, resp, filename=None):
if 'content-disposition' in resp.headers:
m = re.search('filename="([^"]+)"', resp.headers['content-disposition'])
if m is not None:
filename = m.group(1)
return filename
def export(self, base, id, format='gz', params=None): # pylint: disable=invalid-name,redefined-builtin
if params is None:
params = {}
params.update({'format': format})
resp = self.get(base+str(id)+'/', params=params, stream=True)
b = io.BytesIO()
stream.stream_response_to_file(resp, path=b)
resp.close()
b.seek(0)
return (b, self.filename(resp))
def bulk_export(self, base, ids, params=None):
if params is None:
params = {}
params.update({'bulk': 'export', 'ids': ','.join(map(str, ids))})
resp = self.get(base, params=params, stream=True)
b = io.BytesIO()
stream.stream_response_to_file(resp, path=b)
resp.close()
b.seek(0)
return (b, self.filename(resp))
def bulk_copy(self, base, resource, ids, schema):
resp = self.post(base, params={'bulk': 'copy'},
json={resource: [{'id': str(x)} for x in ids]})
return self.decode(schema, resp, many=True)
def bulk_edit(self, base, resource, fields, ids=None, filter=None, type=None, all=False, testvars=None): # pylint: disable=redefined-builtin
json = {'fields': fields}
if ids != None or | |
use when sending faxes from the PC."),
self.user_settings.cmd_fab),
# SETTINGS/TOOLS
(lambda : d.power_settings != POWER_SETTINGS_NONE and avail,
self.__tr("Device Settings"),
"settings",
self.__tr("Your device has special device settings.<br>You may alter these settings here."),
lambda : DeviceSetupDialog(self, self.cur_device_uri)),
(lambda : printer,
self.__tr("Print Test Page"),
"testpage",
self.__tr("Print a test page to test the setup of your printer."),
lambda : PrintTestPageDialog(self, self.cur_printer)),
(lambda : True,
self.__tr("View Printer and Device Information"),
"cups",
self.__tr("View information about the device and all its CUPS queues."),
lambda : InfoDialog(self, self.cur_device_uri)),
(lambda: printer and d.align_type != ALIGN_TYPE_NONE,
self.__tr("Align Cartridges (Print Heads)"),
"align",
self.__tr("This will improve the quality of output when a new cartridge is installed."),
lambda : AlignDialog(self, self.cur_device_uri)),
(lambda: printer and d.clean_type != CLEAN_TYPE_NONE,
self.__tr("Clean Printheads"),
"clean",
self.__tr("You only need to perform this action if you are<br>having problems with poor printout quality due to clogged ink nozzles."),
lambda : CleanDialog(self, self.cur_device_uri)),
(lambda: printer and d.color_cal_type != COLOR_CAL_TYPE_NONE and d.color_cal_type == COLOR_CAL_TYPE_TYPHOON,
self.__tr("Color Calibration"),
"colorcal",
self.__tr("Use this procedure to optimimize your printer's color output<br>(requires glossy photo paper)."),
lambda : ColorCalDialog(self, self.cur_device_uri)),
(lambda: printer and d.color_cal_type != COLOR_CAL_TYPE_NONE and d.color_cal_type != COLOR_CAL_TYPE_TYPHOON,
self.__tr("Color Calibration"),
"colorcal",
self.__tr("Use this procedure to optimimize your printer's color output."),
lambda : ColorCalDialog(self, self.cur_device_uri)),
(lambda: printer and d.linefeed_cal_type != LINEFEED_CAL_TYPE_NONE,
self.__tr("Line Feed Calibration"),
"linefeed_cal",
self.__tr("Use line feed calibration to optimize print quality<br>(to remove gaps in the printed output)."),
lambda : LineFeedCalDialog(self, self.cur_device_uri)),
(lambda: printer and d.pq_diag_type != PQ_DIAG_TYPE_NONE,
self.__tr("Print Diagnostic Page"),
"pq_diag",
self.__tr("Your printer can print a test page <br>to help diagnose print quality problems."),
lambda : PQDiagDialog(self, self.cur_device_uri)),
(lambda: printer and d.wifi_config >= WIFI_CONFIG_USB_XML and bus == 'usb',
self.__tr("Wireless/wifi setup using USB"),
"wireless",
self.__tr("Configure your wireless capable printer using a temporary USB connection."),
'hp-wificonfig -d %s' % self.cur_device_uri),
# FIRMWARE
(lambda : printer and d.fw_download ,
self.__tr("Download Firmware"),
"firmware",
self.__tr("Download firmware to your printer <br>(required on some devices after each power-up)."),
lambda : FirmwareDialog(self, self.cur_device_uri)),
# PLUGIN
(lambda : printer and req_plugin,
self.__tr("Install Required Plugin"),
"plugin",
x,
lambda : PluginInstall(self, d.plugin, plugin_installed)),
(lambda : printer and opt_plugin,
self.__tr("Install Optional Plugin"),
"plugin",
x,
lambda : PluginInstall(self, d.plugin, plugin_installed)),
# EWS
(lambda : printer and d.embedded_server_type > EWS_NONE and bus == 'net',
self.__tr("Open printer's web page in a browser"),
"ews",
self.__tr("The printer's web page has supply, status, and other information."),
openEWS(host, zc)),
# HELP/WEBSITE
(lambda : True,
self.__tr("Visit HPLIP Support Website"),
"hp_logo",
self.__tr("Visit HPLIP Support Website."),
self.support),
(lambda : True,
self.__tr("Help"),
"help",
self.__tr("View HPLIP help."),
self.docs),
]
if not self.func_icons_cached:
for filte, text, icon, tooltip, cmd in self.ICONS:
self.func_icons[icon] = load_pixmap(icon, '32x32')
self.func_icons_cached = True
for fltr, text, icon, tooltip, cmd in self.ICONS:
if fltr is not None:
if not fltr():
continue
FuncViewItem(self.ActionsList, text,
self.func_icons[icon],
tooltip,
cmd)
finally:
endWaitCursor()
def ActionsList_clicked(self, item):
if item is not None and self.click_lock is not item:
self.click_lock = item
if item.cmd and isinstance(item.cmd, collections.Callable):
dlg = item.cmd()
self.sendMessage('', '', EVENT_DEVICE_STOP_POLLING)
try:
dlg.exec_()
finally:
self.sendMessage('', '', EVENT_DEVICE_START_POLLING)
else:
beginWaitCursor()
if item.cmd.split(':')[0] in ('http', 'https', 'file'):
log.debug("Opening browser to: %s" % item.cmd)
utils.openURL(item.cmd)
else:
self.runExternalCommand(str(item.cmd))
QTimer.singleShot(1000, self.unlockClick)
def unlockClick(self):
self.click_lock = None
endWaitCursor()
def ActionsList_customContextMenuRequested(self, p):
print(p)
#pass
# ***********************************************************************************
#
# STATUS TAB
#
# ***********************************************************************************
def initStatusTab(self):
self.StatusTable.setColumnCount(0)
self.status_headers = [self.__tr(""), self.__tr("Status"), self.__tr("Date and Time"),
self.__tr("Code"), self.__tr("Job ID"), self.__tr("Description")]
def updateStatusTab(self):
self.updateStatusLCD()
self.updateStatusTable()
def updateStatusLCD(self):
if self.cur_device is not None and \
self.cur_device.hist and \
self.cur_device.supported:
dq = self.cur_device.dq
if dq.get('panel', 0) == 1:
line1 = dq.get('panel-line1', '')
line2 = dq.get('panel-line2', '')
else:
try:
line1 = device.queryString(self.cur_device.hist[0].event_code)
except (AttributeError, TypeError):
line1 = ''
line2 = ''
self.drawStatusLCD(line1, line2)
else:
if self.cur_device.status_type == STATUS_TYPE_NONE:
self.drawStatusLCD(self.__tr("Status information not"), self.__tr("available for this device."))
elif not self.cur_device.supported:
self.drawStatusLCD(self.__tr("Device not supported."))
elif not self.cur_device.hist:
self.drawStatusLCD(self.__tr("No status history available."))
else:
self.drawStatusLCD()
def drawStatusLCD(self, line1='', line2=''):
pm = load_pixmap('panel_lcd', 'other')
p = QPainter()
p.begin(pm)
p.setPen(QColor(0, 0, 0))
p.setFont(self.font())
x, y_line1, y_line2 = 10, 17, 33
# TODO: Scroll long lines
if line1:
p.drawText(x, y_line1, line1)
if line2:
p.drawText(x, y_line2, line2)
p.end()
self.LCD.setPixmap(pm)
def updateStatusTable(self):
self.StatusTable.clear()
flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled
row = 0
hist = self.cur_device.hist[:]
if hist:
self.StatusTable.setRowCount(len(hist))
self.StatusTable.setColumnCount(len(self.status_headers))
self.StatusTable.setHorizontalHeaderLabels(self.status_headers)
self.StatusTable.verticalHeader().hide()
self.StatusTable.horizontalHeader().show()
hist.reverse()
row = len(hist)-1
for e in hist:
if e is None:
continue
ess = device.queryString(e.event_code, 0)
esl = device.queryString(e.event_code, 1)
if row == 0:
desc = self.__tr("(most recent)")
else:
desc = getTimeDeltaDesc(e.timedate)
dt = QDateTime()
dt.setTime_t(int(e.timedate)) #, Qt.LocalTime)
# TODO: In Qt4.x, use QLocale.toString(date, format)
tt = str("%s %s"%(dt.toString(),desc))
if e.job_id:
job_id = to_unicode(e.job_id)
else:
job_id = to_unicode('')
error_state = STATUS_TO_ERROR_STATE_MAP.get(e.event_code, ERROR_STATE_CLEAR)
tech_type = self.cur_device.tech_type
if tech_type in (TECH_TYPE_COLOR_INK, TECH_TYPE_MONO_INK):
status_pix = getStatusListIcon(error_state)[0] # ink
else:
status_pix = getStatusListIcon(error_state)[1] # laser
event_code = to_unicode(e.event_code)
i = QTableWidgetItem(QIcon(status_pix), self.__tr(""))
i.setFlags(flags)
self.StatusTable.setItem(row, 0, i)
for col, t in [(1, ess), (2, tt), (3, event_code), (4, job_id), (5, esl)]:
i = QTableWidgetItem(str(t))
i.setFlags(flags)
self.StatusTable.setItem(row, col, i)
row -= 1
self.StatusTable.resizeColumnsToContents()
self.StatusTable.setColumnWidth(0, 24)
else:
self.StatusTable.setRowCount(1)
self.StatusTable.setColumnCount(2)
self.StatusTable.setHorizontalHeaderLabels(["", ""])
self.StatusTable.verticalHeader().hide()
self.StatusTable.horizontalHeader().hide()
flags = Qt.ItemIsEnabled
pixmap = getStatusListIcon(ERROR_STATE_ERROR)[0]
i = QTableWidgetItem(QIcon(pixmap), self.__tr(""))
i.setFlags(flags)
self.StatusTable.setItem(row, 0, i)
i = QTableWidgetItem(self.__tr("Status information not available for this device."))
i.setFlags(flags)
self.StatusTable.setItem(0, 1, i)
self.StatusTable.resizeColumnsToContents()
self.StatusTable.setColumnWidth(0, 24)
# ***********************************************************************************
#
# SUPPLIES TAB
#
# ***********************************************************************************
def initSuppliesTab(self):
self.pix_battery = load_pixmap('battery', '16x16')
yellow = "#ffff00"
light_yellow = "#ffffcc"
cyan = "#00ffff"
light_cyan = "#ccffff"
magenta = "#ff00ff"
light_magenta = "#ffccff"
black = "#000000"
blue = "#0000ff"
gray = "#808080"
dark_gray = "#a9a9a9"
light_gray = "#c0c0c0"
red = "#ff0000"
self.TYPE_TO_PIX_MAP = {
AGENT_TYPE_UNSPECIFIED : [black],
AGENT_TYPE_BLACK: [black],
AGENT_TYPE_MATTE_BLACK : [black],
AGENT_TYPE_PHOTO_BLACK : [dark_gray],
AGENT_TYPE_BLACK_B8800: [black],
AGENT_TYPE_CMY: [cyan, magenta, yellow],
AGENT_TYPE_KCM: [light_cyan, light_magenta, light_yellow],
AGENT_TYPE_GGK: [dark_gray],
AGENT_TYPE_YELLOW: [yellow],
AGENT_TYPE_MAGENTA: [magenta],
AGENT_TYPE_CYAN : [cyan],
AGENT_TYPE_CYAN_LOW: [light_cyan],
AGENT_TYPE_YELLOW_LOW: [light_yellow],
AGENT_TYPE_MAGENTA_LOW: [light_magenta],
AGENT_TYPE_BLUE: [blue],
AGENT_TYPE_KCMY_CM: [yellow, cyan, magenta],
AGENT_TYPE_LC_LM: [light_cyan, light_magenta],
#AGENT_TYPE_Y_M: [yellow, magenta],
#AGENT_TYPE_C_K: [black, cyan],
AGENT_TYPE_LG_PK: [light_gray, dark_gray],
AGENT_TYPE_LG: [light_gray],
AGENT_TYPE_G: [gray],
AGENT_TYPE_DG: [dark_gray],
AGENT_TYPE_PG: [light_gray],
AGENT_TYPE_C_M: [cyan, magenta],
AGENT_TYPE_K_Y: [black, yellow],
AGENT_TYPE_LC: [light_cyan],
AGENT_TYPE_RED : [red],
}
self.supplies_headers = [self.__tr(""), self.__tr("Description"),
self.__tr("HP Part No."), self.__tr("Approx. Level"),
self.__tr("Status")]
def updateSuppliesTab(self):
beginWaitCursor()
flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled
try:
self.SuppliesTable.clear()
self.SuppliesTable.setRowCount(0)
self.SuppliesTable.setColumnCount(0)
if self.cur_device is not None and \
self.cur_device.supported and \
self.cur_device.status_type != STATUS_TYPE_NONE and \
self.cur_device.device_state != DEVICE_STATE_NOT_FOUND:
self.cur_device.sorted_supplies = []
a = 1
while True:
try:
agent_type = int(self.cur_device.dq['agent%d-type' % a])
agent_kind = int(self.cur_device.dq['agent%d-kind' % a])
agent_sku = self.cur_device.dq['agent%d-sku' % a]
except KeyError:
break
else:
self.cur_device.sorted_supplies.append((a, agent_kind, agent_type, agent_sku))
a += 1
self.cur_device.sorted_supplies.sort(key=utils.cmp_to_key(utils.levelsCmp))
self.SuppliesTable.setRowCount(len(self.cur_device.sorted_supplies))
self.SuppliesTable.setColumnCount(len(self.supplies_headers))
self.SuppliesTable.setHorizontalHeaderLabels(self.supplies_headers)
self.SuppliesTable.verticalHeader().hide()
self.SuppliesTable.horizontalHeader().show()
self.SuppliesTable.setIconSize(QSize(100, 18))
for row, x in enumerate(self.cur_device.sorted_supplies):
a, agent_kind, agent_type, agent_sku = x
try:
agent_level = int(self.cur_device.dq['agent%d-level' % a])
agent_desc = self.cur_device.dq['agent%d-desc' % a]
agent_health_desc = self.cur_device.dq['agent%d-health-desc' % a]
except KeyError:
break
# Bar graph level
level_pixmap = None
if agent_kind in (AGENT_KIND_SUPPLY,
#AGENT_KIND_HEAD,
AGENT_KIND_HEAD_AND_SUPPLY,
AGENT_KIND_TONER_CARTRIDGE,
AGENT_KIND_MAINT_KIT,
AGENT_KIND_ADF_KIT,
AGENT_KIND_INT_BATTERY,
AGENT_KIND_DRUM_KIT,
):
level_pixmap = self.createStatusLevelGraphic(agent_level, agent_type)
# Color icon
pixmap = None
if agent_kind in (AGENT_KIND_SUPPLY,
AGENT_KIND_HEAD,
AGENT_KIND_HEAD_AND_SUPPLY,
AGENT_KIND_TONER_CARTRIDGE,
#AGENT_KIND_MAINT_KIT,
#AGENT_KIND_ADF_KIT,
AGENT_KIND_INT_BATTERY,
#AGENT_KIND_DRUM_KIT,
):
pixmap = self.getStatusIcon(agent_kind, agent_type)
if pixmap is not None:
i = QTableWidgetItem(QIcon(pixmap), self.__tr(""))
i.setFlags(flags)
self.SuppliesTable.setItem(row, 0, i)
for col, t in [(1, agent_desc), (2, agent_sku), (4, agent_health_desc)]:
i = QTableWidgetItem(str(t))
i.setFlags(flags)
self.SuppliesTable.setItem(row, col, i)
if level_pixmap is not None:
i = QTableWidgetItem(QIcon(level_pixmap), self.__tr(""))
i.setFlags(flags)
self.SuppliesTable.setItem(row, 3, i)
self.SuppliesTable.resizeColumnsToContents()
self.SuppliesTable.setColumnWidth(0, 24)
self.SuppliesTable.setColumnWidth(3, 120)
else: # No supplies info
log.warning("Supplies information not available for this device.")
flags = Qt.ItemIsEnabled
self.SuppliesTable.setRowCount(1)
self.SuppliesTable.setColumnCount(2)
self.SuppliesTable.setHorizontalHeaderLabels(["", ""])
self.SuppliesTable.verticalHeader().hide()
self.SuppliesTable.horizontalHeader().hide()
i = QTableWidgetItem(self.__tr("Supplies information not available for this device."))
i.setFlags(flags)
self.SuppliesTable.setItem(0, 1, i)
pixmap = getStatusListIcon(ERROR_STATE_ERROR)[0]
i = QTableWidgetItem(QIcon(pixmap), self.__tr(""))
i.setFlags(flags)
self.SuppliesTable.setItem(0, 0, i)
self.SuppliesTable.resizeColumnsToContents()
self.SuppliesTable.setColumnWidth(0, 24)
finally:
endWaitCursor()
def getStatusIcon(self, agent_kind, agent_type):
if agent_kind in (AGENT_KIND_SUPPLY,
AGENT_KIND_HEAD,
AGENT_KIND_HEAD_AND_SUPPLY,
AGENT_KIND_TONER_CARTRIDGE):
map = self.TYPE_TO_PIX_MAP[agent_type]
if isinstance(map, list):
map_len = len(map)
pix = QPixmap(16, 16)
pix.fill(QColor(0, 0, 0, 0))
p = QPainter()
p.begin(pix)
p.setRenderHint(QPainter.Antialiasing)
if map_len == 1:
p.setPen(QColor(map[0]))
p.setBrush(QBrush(QColor(map[0]), Qt.SolidPattern))
p.drawPie(2, 2, 10, 10, 0, 5760)
elif map_len == 2:
p.setPen(QColor(map[0]))
p.setBrush(QBrush(QColor(map[0]), Qt.SolidPattern))
p.drawPie(2, 4, 8, 8, 0, 5760)
p.setPen(QColor(map[1]))
p.setBrush(QBrush(QColor(map[1]), Qt.SolidPattern))
p.drawPie(6, 4, 8, 8, 0, 5760)
elif map_len == 3:
p.setPen(QColor(map[2]))
p.setBrush(QBrush(QColor(map[2]), Qt.SolidPattern))
p.drawPie(6, | |
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import csv
import cv2
import itertools
import json
import numpy as np
import pickle
import time
__all__ = ['House']
######################################
# Util Functions
######################################
# allowed target room types
# NOTE: consider "toilet" and "bathroom" the same thing
ALLOWED_TARGET_ROOM_TYPES = ['kitchen', 'dining_room', 'living_room', 'bathroom', 'bedroom'] # 'office'
# allowed room types for auxiliary prediction task
ALLOWED_PREDICTION_ROOM_TYPES = dict(
outdoor=0, indoor=1, kitchen=2, dining_room=3, living_room=4, bathroom=5, bedroom=6, office=7, storage=8)
def _equal_room_tp(room, target):
"""
NOTE: Ensure <target> is always from <ALLOWED_TARGET_ROOM_TYPES>!!!!
DO NOT swap the order of arguments
"""
room = room.lower()
target = target.lower()
return (room == target) or \
((target == 'bathroom') and (room == 'toilet')) or \
((target == 'bedroom') and (room == 'guest_room'))
def _get_pred_room_tp_id(room):
room = room.lower()
if room == 'toilet':
room = 'bathroom'
elif room == 'guest_room':
room = 'bedroom'
if room not in ALLOWED_PREDICTION_ROOM_TYPES:
return ALLOWED_PREDICTION_ROOM_TYPES['indoor']
return ALLOWED_PREDICTION_ROOM_TYPES[room]
def parse_walls(objFile, lower_bound = 1.0):
def create_box(vers):
if len(vers) == 0:
return None
v_max = [-1e20, -1e20, -1e20]
v_min = [1e20, 1e20, 1e20]
for v in vers:
for i in range(3):
if v[i] < v_min[i]: v_min[i] = v[i]
if v[i] > v_max[i]: v_max[i] = v[i]
obj = {}
obj['bbox'] = {}
obj['bbox']['min']=v_min
obj['bbox']['max']=v_max
if v_min[1] < lower_bound:
return obj
return None
walls = []
with open(objFile, 'r') as file:
vers = []
for line in file.readlines():
if len(line) < 2: continue
if line[0] == 'g':
if (vers is not None) and (len(vers) > 0): walls.append(create_box(vers))
if ('Wall' in line):
vers = []
else:
vers = None
if (vers is not None) and (line[0] == 'v') and (line[1] == ' '):
vals = line[2:]
coor =[float(v) for v in vals.split(' ') if len(v)>0]
if len(coor) != 3:
print('line = {}'.format(line))
print('coor = {}'.format(coor))
assert(False)
vers.append(coor)
if (vers is not None) and (len(vers) > 0): walls.append(create_box(vers))
ret_walls = [w for w in walls if w is not None]
return ret_walls
def fill_region(proj, x1, y1, x2, y2, c):
proj[x1:(x2 + 1), y1:(y2 + 1)] = c
def fill_obj_mask(house, dest, obj, c=1):
n_row = dest.shape[0]
_x1, _, _y1 = obj['bbox']['min']
_x2, _, _y2 = obj['bbox']['max']
x1,y1,x2,y2 = house.rescale(_x1,_y1,_x2,_y2,n_row)
fill_region(dest, x1, y1, x2, y2, c)
class House(object):
"""core class for loading and processing a house from SUNCG dataset
"""
def __init__(self, JsonFile, ObjFile, MetaDataFile,
CachedFile=None,
StorageFile=None,
GenRoomTypeMap=False,
EagleViewRes=100,
DebugInfoOn=False,
ColideRes=1000,
RobotRadius=0.1,
RobotHeight=1.0,
CarpetHeight=0.15,
SetTarget=False,
ApproximateMovableMap=False,
_IgnoreSmallHouse=False, # should be only set true when called by "cache_houses.py"
DebugMessages=False
):
"""Initialization and Robot Parameters
Note:
Generally only the first 4 arguments are required to set up a house
Ensure you run the script to generate cached data for all the houses
Args:
JsonFile (str): file name of the house json file (house.json)
ObjFile (str): file name of the house object file (house.obj)
MetaDataFile (str): file name of the meta data (ModelCategoryMapping.csv)
CachedFile (str, recommended): file name of the pickled cached data for this house, None if no such cache (cachedmap1k.pkl)
StorageFile (str, optional): if CachedFile is None, pickle all the data and store in this file
GenRoomTypeMap (bool, optional): if turned on, generate the room type map for each location
EagleViewRes (int, optional): resolution of the topdown 2d map
DebugInfoOn (bool, optional): store additional debugging information when this option is on
ColideRes (int, optional): resolution of the 2d map for collision check (generally should not changed)
RobotRadius (double, optional): radius of the robot/agent (generally should not be changed)
RobotHeight (double, optional): height of the robot/agent (generally should not be changed)
CarpetHeight (double, optional): maximum height of the obstacles that agent can directly go through (gennerally should not be changed)
SetTarget (bool, optional): whether or not to choose a default target room and pre-compute the valid locations
ApproximateMovableMap (bool, optional): Fast initialization of valid locations which are not as accurate or fine-grained. Requires OpenCV if true
DebugMessages=False (bool, optional): whether or not to show debug messages
"""
if DebugMessages == True:
ts = time.time()
print('Data Loading ...')
self.metaDataFile = MetaDataFile
self.objFile = ObjFile
self.robotHei = RobotHeight
self.carpetHei = CarpetHeight
self.robotRad = RobotRadius
self._debugMap = None if not DebugInfoOn else True
with open(JsonFile) as jfile:
self.house = house = json.load(jfile)
self.all_walls = parse_walls(ObjFile, RobotHeight)
# validity check
if abs(house['scaleToMeters'] - 1.0) > 1e-8:
print('[Error] Currently <scaleToMeters> must be 1.0!')
assert(False)
if len(house['levels']) > 1 and DebugMessages == True:
print('[Warning] Currently only support ground floor! <total floors = %d>' % (len(house['levels'])))
self.level = level = house['levels'][0] # only support ground floor now
self.L_min_coor = _L_lo = np.array(level['bbox']['min']).astype('float32')
self.L_lo = min(_L_lo[0], _L_lo[2])
self.L_max_coor = _L_hi = np.array(level['bbox']['max']).astype('float32')
self.L_hi = max(_L_hi[0], _L_hi[2])
self.L_det = self.L_hi - self.L_lo
self.n_row = ColideRes
self.eagle_n_row = EagleViewRes
self.grid_det = self.L_det / np.float32(self.n_row)
self.all_obj = [node for node in level['nodes'] if node['type'].lower() == 'object']
self.all_rooms = [node for node in level['nodes'] if (node['type'].lower() == 'room') and ('roomTypes' in node)]
self.all_roomTypes = [room['roomTypes'] for room in self.all_rooms]
self.all_desired_roomTypes = []
self.default_roomTp = None
for roomTp in ALLOWED_TARGET_ROOM_TYPES:
if any([any([_equal_room_tp(tp, roomTp) for tp in tps]) for tps in self.all_roomTypes]):
self.all_desired_roomTypes.append(roomTp)
if self.default_roomTp is None: self.default_roomTp = roomTp
assert self.default_roomTp is not None, 'Cannot Find Any Desired Rooms!'
if DebugMessages == True:
print('>> Default Target Room Type Selected = {}'.format(self.default_roomTp))
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
if _IgnoreSmallHouse and ((len(self.all_desired_roomTypes) < 2) or ('kitchen' not in self.all_desired_roomTypes)):
self.all_desired_roomTypes=[]
return
if DebugMessages == True:
print('Generating Low Resolution Obstacle Map ...')
ts = time.time()
# generate a low-resolution obstacle map
self.tinyObsMap = np.ones((self.eagle_n_row, self.eagle_n_row), dtype=np.uint8)
self.genObstacleMap(MetaDataFile, gen_debug_map=False, dest=self.tinyObsMap, n_row=self.eagle_n_row-1)
self.eagleMap = np.zeros((4, self.eagle_n_row, self.eagle_n_row), dtype=np.uint8)
self.eagleMap[0, ...] = self.tinyObsMap
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
# load from cache
if CachedFile is not None:
assert not DebugInfoOn, 'Please set DebugInfoOn=True when loading data from cached file!'
if DebugMessages == True:
print('Loading Obstacle Map and Movability Map From Cache File ...')
ts = time.time()
with open(CachedFile, 'rb') as f:
self.obsMap, self.moveMap = pickle.load(f)
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
else:
# generate obstacle map
if DebugMessages == True:
print('Generate High Resolution Obstacle Map (For Collision Check) ...')
ts = time.time()
# obsMap was indexed by (x, y), not (y, x)
self.obsMap = np.ones((self.n_row+1, self.n_row+1), dtype=np.uint8) # a small int is enough
if self._debugMap is not None:
self._debugMap = np.ones((self.n_row+1, self.n_row+1), dtype=np.float)
self.genObstacleMap(MetaDataFile)
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
# generate movability map for robots considering the radius
if DebugMessages == True:
print('Generate Movability Map ...')
ts = time.time()
self.moveMap = np.zeros((self.n_row+1, self.n_row+1), dtype=np.int8) # initially not movable
self.genMovableMap(ApproximateMovableMap)
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
if StorageFile is not None:
if DebugMessages == True:
print('Storing Obstacle Map and Movability Map to Cache File ...')
ts = time.time()
with open(StorageFile, 'wb') as f:
pickle.dump([self.obsMap, self.moveMap], f)
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
# set target room connectivity
if DebugMessages == True:
ts = time.time()
self.connMapDict = {}
self.roomTypeLocMap = {} # roomType -> feasible locations
self.targetRoomTp = None
self.targetRooms = []
self.connMap = None
self.inroomDist = None
if SetTarget:
if DebugMessages == True:
print('Generate Target connectivity Map (Default <{}>) ...'.format(self.default_roomTp))
self.setTargetRoom(self.default_roomTp, _setEagleMap=True)
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
self.roomTypeMap = None
if GenRoomTypeMap:
if DebugMessages == True:
ts = time.time()
print('Generate Room Type Map ...')
self.roomTypeMap = np.zeros((self.n_row+1, self.n_row+1), dtype=np.uint16)
self._generate_room_type_map()
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time() - ts))
def _generate_room_type_map(self):
rtMap = self.roomTypeMap
# fill all the mask of rooms
for room in self.all_rooms:
msk = 1 << _get_pred_room_tp_id('indoor')
for tp in room['roomTypes']: msk = msk | (1 << _get_pred_room_tp_id(tp))
_x1, _, _y1 = room['bbox']['min']
_x2, _, _y2 = room['bbox']['max']
x1, y1, x2, y2 = self.rescale(_x1, _y1, _x2, _y2)
for x | |
Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_d1845268faf55f98bc952872259f16f_v2_2_1')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/global-credential/http-read')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_d1845268faf55f98bc952872259f16f_v2_2_1', json_data)
def create_http_read_credentials(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Adds HTTP read credentials.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_ffcaccdd9f2530abf66adc98c3f0201_v2_2_1')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/global-credential/http-read')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_ffcaccdd9f2530abf66adc98c3f0201_v2_2_1', json_data)
def update_cli_credentials(self,
comments=None,
credentialType=None,
description=None,
enablePassword=<PASSWORD>,
id=None,
instanceTenantId=None,
instanceUuid=None,
password=<PASSWORD>,
username=None,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Updates global CLI credentials.
Args:
comments(string): Discovery's comments.
credentialType(string): Discovery's credentialType. Available values are 'GLOBAL' and 'APP'.
description(string): Discovery's description.
enablePassword(string): Discovery's enablePassword.
id(string): Discovery's id.
instanceTenantId(string): Discovery's instanceTenantId.
instanceUuid(string): Discovery's instanceUuid.
password(string): <PASSWORD>.
username(string): Discovery's username.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = {
'comments':
comments,
'credentialType':
credentialType,
'description':
description,
'enablePassword':
enablePassword,
'id':
id,
'instanceTenantId':
instanceTenantId,
'instanceUuid':
instanceUuid,
'password':
password,
'username':
username,
}
_payload.update(payload or {})
_payload = dict_from_items_with_values(_payload)
if active_validation:
self._request_validator('jsd_d39d23589e85db0a63c414057c_v2_2_1')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/global-credential/cli')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_d39d23589e85db0a63c414057c_v2_2_1', json_data)
def create_cli_credentials(self,
comments=None,
credentialType=None,
description=None,
enablePassword=None,
id=None,
instanceTenantId=None,
instanceUuid=None,
password=<PASSWORD>,
username=None,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Adds global CLI credential.
Args:
comments(string): Discovery's comments.
credentialType(string): Discovery's credentialType. Available values are 'GLOBAL' and 'APP'.
description(string): Discovery's description.
enablePassword(string): Discovery's enablePassword.
id(string): Discovery's id.
instanceTenantId(string): Discovery's instanceTenantId.
instanceUuid(string): Discovery's instanceUuid.
password(string): Discovery's password.
username(string): Discovery's username.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = {
'comments':
comments,
'credentialType':
credentialType,
'description':
description,
'enablePassword':
enablePassword,
'id':
id,
'instanceTenantId':
instanceTenantId,
'instanceUuid':
instanceUuid,
'password':
password,
'username':
username,
}
_payload.update(payload or {})
_payload = dict_from_items_with_values(_payload)
if active_validation:
self._request_validator('jsd_c524f0ec199e5435bcaee56b423532e7_v2_2_1')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/global-credential/cli')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_c524f0ec199e5435bcaee56b423532e7_v2_2_1', json_data)
def get_list_of_discoveries_by_discovery_id(self,
id,
ip_address=None,
limit=None,
offset=None,
headers=None,
**request_parameters):
"""Returns the list of discovery jobs for the given Discovery ID. The results can be optionally filtered based on
IP. Discovery ID can be obtained using the "Get Discoveries by range" API.
Args:
id(basestring): id path parameter. Discovery ID.
offset(int): offset query parameter.
limit(int): limit query parameter.
ip_address(basestring): ipAddress query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(offset, int)
check_type(limit, int)
check_type(ip_address, basestring)
check_type(id, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'offset':
offset,
'limit':
limit,
'ipAddress':
ip_address,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'id': id,
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/discovery/{id}/job')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e369e19c1a835567855984d9f2c628ef_v2_2_1', json_data)
def get_discovery_jobs_by_ip(self,
ip_address,
limit=None,
name=None,
offset=None,
headers=None,
**request_parameters):
"""Returns the list of discovery jobs for the given IP.
Args:
offset(int): offset query parameter.
limit(int): limit query parameter.
ip_address(basestring): ipAddress query parameter.
name(basestring): name query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(offset, int)
check_type(limit, int)
check_type(ip_address, basestring,
may_be_none=False)
check_type(name, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'offset':
offset,
'limit':
limit,
'ipAddress':
ip_address,
'name':
name,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/discovery/job')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_bde1ca5763fc552ab78cd3b2ecf119b1_v2_2_1', json_data)
def get_devices_discovered_by_id(self,
id,
task_id=None,
headers=None,
**request_parameters):
"""Returns the count of network devices discovered in the given discovery. Discovery ID can be obtained using the
"Get Discoveries by range" API.
Args:
id(basestring): id path parameter. Discovery ID.
task_id(basestring): taskId query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's | |
'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIBarracksTrainInfestedCivilianLevel2': {
'build_time': 120,
'built_from': ['SICivilianStructure'],
'display_name': 'Infestation Level 1',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIBarracksTrainInfestedCivilianLevel3': {
'build_time': 120,
'built_from': ['SICivilianStructure'],
'display_name': 'Infestation Level 2',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIBarracksTrainInfestedCivilianLevel4': {
'build_time': 120,
'built_from': ['SICivilianStructure'],
'display_name': 'Infestation Level 3',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'StukovInfestedInfestedCivilianLeapAttack': {
'build_time': 60,
'built_from': ['SICivilianStructure'],
'display_name': 'Anaerobic Enhancement',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'StukovInfestedCivilianSpawnBroodlingOnDeath': {
'build_time': 90,
'built_from': ['SICivilianStructure'],
'display_name': 'Broodling Gestation',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIMarinePlaguedMunitions': {
'build_time': 90,
'built_from': ['SIBarracksTechLab'],
'display_name': 'Plagued Munitions',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIMarineTrooperRange': {
'build_time': 60,
'built_from': ['SIBarracksTechLab'],
'display_name': 'Retinal Augmentation',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIBunkerLifeRegen': {
'build_time': 60,
'built_from': ['SIEngineeringBay'],
'display_name': 'Regenerative Plating',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIBunkerArmor': {
'build_time': 60,
'built_from': ['SIEngineeringBay'],
'display_name': 'Calcified Armor',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIInfantryWeaponsLevel1': {
'build_time': 160,
'built_from': ['SIEngineeringBay'],
'display_name': 'Stukov Infantry Weapons Level 1',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIInfantryWeaponsLevel2': {
'build_time': 190,
'built_from': ['SIEngineeringBay'],
'display_name': 'Stukov Infantry Weapons Level 2',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIInfantryWeaponsLevel3': {
'build_time': 220,
'built_from': ['SIEngineeringBay'],
'display_name': 'Stukov Infantry Weapons Level 3',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIInfantryArmorLevel1': {
'build_time': 160,
'built_from': ['SIEngineeringBay'],
'display_name': 'Stukov Infantry Armor Level 1',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIInfantryArmorLevel2': {
'build_time': 190,
'built_from': ['SIEngineeringBay'],
'display_name': 'Stukov Infantry Armor Level 2',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIInfantryArmorLevel3': {
'build_time': 220,
'built_from': ['SIEngineeringBay'],
'display_name': 'Stukov Infantry Armor Level 3',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIDiamondbackImprovedEnsnare': {
'build_time': 60,
'built_from': ['SITechLab'],
'display_name': 'Saturated Cultures',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'InfestedSiegeTankAmmo': {
'build_time': 60,
'built_from': ['SITechLab'],
'display_name': 'Automated Mitosis',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIDiamondbackSnailTrail': {
'build_time': 90,
'built_from': ['SITechLab'],
'display_name': 'Caustic Mucus',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'InfestedSiegeTankArmoredDamage': {
'build_time': 90,
'built_from': ['SITechLab'],
'display_name': 'Acidic Enzymes',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SILiberatorImprovedAoeAttack': {
'build_time': 60,
'built_from': ['SITechLab'],
'display_name': 'Viral Contamination',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'LiberatorInfestedBecomeSwarm': {
'build_time': 60,
'built_from': ['SITechLab'],
'display_name': 'Cloud Dispersal',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'StukovInfestedBansheeBurrowRegeneration': {
'build_time': 60,
'built_from': ['SITechLab'],
'display_name': 'Rapid Hibernation',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'StukovInfestedBansheeInfestedLife': {
'build_time': 60,
'built_from': ['SITechLab'],
'display_name': 'Braced Exoskeleton',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIQueenFungalGrowth': {
'build_time': 60,
'built_from': ['SITechLab'],
'display_name': 'Fungal Growth',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SIQueenEnergy': {
'build_time': 60,
'built_from': ['SITechLab'],
'display_name': 'Enhanced Mitochondria',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SITerranVehicleWeaponsLevel1': {
'build_time': 160,
'built_from': ['SIArmory'],
'display_name': 'Stukov Vehicle and Ship Weapons Level 1',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SITerranVehicleWeaponsLevel2': {
'build_time': 190,
'built_from': ['SIArmory'],
'display_name': 'Stukov Vehicle and Ship Weapons Level 2',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SITerranVehicleWeaponsLevel3': {
'build_time': 220,
'built_from': ['SIArmory'],
'display_name': 'Stukov Vehicle and Ship Weapons Level 3',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SITerranVehicleArmorsLevel1': {
'build_time': 160,
'built_from': ['SIArmory'],
'display_name': 'Stukov Vehicle and Ship Armor Level 1',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SITerranVehicleArmorsLevel2': {
'build_time': 190,
'built_from': ['SIArmory'],
'display_name': 'Stukov Vehicle and Ship Armor Level 2',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'SITerranVehicleArmorsLevel3': {
'build_time': 220,
'built_from': ['SIArmory'],
'display_name': 'Stukov Vehicle and Ship Armor Level 3',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
},
'Fenix': {
# Units
'ZealotPurifier': {
'build_time': 30,
'built_from': ['Gateway', 'WarpGate'],
'display_name': 'Legionnaire',
'race': 'Protoss',
'type': 'Unit',
'is_morph': False,
},
'SentryFenix': {
'build_time': 37,
'built_from': ['Gateway', 'WarpGate'],
'display_name': 'Conservator',
'race': 'Protoss',
'type': 'Unit',
'is_morph': False,
},
'AdeptFenix': {
'build_time': 38,
'built_from': ['Gateway', 'WarpGate'],
'display_name': 'Adept',
'race': 'Protoss',
'type': 'Unit',
'is_morph': False,
},
'ColossusPurifier': {
'build_time': 75,
'built_from': ['RoboticsFacility'],
'display_name': 'Colossus',
'race': 'Protoss',
'type': 'Unit',
'is_morph': False,
},
'Scout': {
'build_time': 30,
'built_from': ['Stargate'],
'display_name': 'Scout',
'race': 'Protoss',
'type': 'Unit',
'is_morph': False,
},
# Buildings
# Upgrades
'FenixSuitAttackDamage': {
'build_time': 90,
'built_from': ['Forge'],
'display_name': 'Purifier Armaments',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'AStrongHeart': {
'build_time': 10,
'built_from': ['Forge'],
'display_name': 'A Strong Heart',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixArbiterDetection': {
'build_time': 60,
'built_from': ['Forge'],
'display_name': 'Observation Protocol',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixSentryGuardianZoneUpgrade': {
'build_time': 60,
'built_from': ['CyberneticsCore'],
'display_name': 'Optimized Emitters',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'AdeptFenixShadeSpawn': {
'build_time': 60,
'built_from': ['TwilightCouncil'],
'display_name': 'Psionic Projection',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixKaldalisCleave': {
'build_time': 60,
'built_from': ['TwilightCouncil'],
'display_name': 'Empowered Blades',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixChampionTalisAdeptBounceShotUpgrade': {
'build_time': 60,
'built_from': ['TwilightCouncil'],
'display_name': 'Debilitation System',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'DisruptorCloak': {
'build_time': 60,
'built_from': ['RoboticsBay'],
'display_name': 'Cloaking Module',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'DisruptorSecondExplosion': {
'build_time': 90,
'built_from': ['RoboticsBay'],
'display_name': 'Purification Echo',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixImmortalDetonationShot': {
'build_time': 90,
'built_from': ['RoboticsBay'],
'display_name': 'Gravimetric Overload',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixWarbringerColossusPowerShot': {
'build_time': 90,
'built_from': ['RoboticsBay'],
'display_name': 'Purification Blast',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixScoutWeaponRange': {
'build_time': 60,
'built_from': ['FleetBeacon'],
'display_name': 'Combat Sensor Array',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixChampionScoutAOEMissiles': {
'build_time': 60,
'built_from': ['FleetBeacon'],
'display_name': 'Suppression Procedure',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixChampionCarrierBombers': {
'build_time': 90,
'built_from': ['FleetBeacon'],
'display_name': 'Interdictors',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixChampionTaldarinImmortal': {
'build_time': 60,
'built_from': ['PurifierConclave'], # TODO verify
'display_name': 'Taldarin\'s A.I. Personality',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixChampionWarbringerColossus': {
'build_time': 60,
'built_from': ['PurifierConclave'], # TODO verify
'display_name': 'Warbringer\'s A.I. Personality',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixChampionKaldalisZealot': {
'build_time': 40,
'built_from': ['PurifierConclave'], # TODO verify
'display_name': 'Kaldalis\' A.I. Personality',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixChampionMojoScout': {
'build_time': 60,
'built_from': ['PurifierConclave'], # TODO verify
'display_name': 'Mojo\'s A.I. Personality',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixChampionTalisAdept': {
'build_time': 60,
'built_from': ['PurifierConclave'], # TODO verify
'display_name': 'Talis\' A.I. Personality',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'FenixChampionClolarionCarrier': {
'build_time': 60,
'built_from': ['PurifierConclave'], # TODO verify
'display_name': 'Clolarion\'s A.I. Personality',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
},
'Dehaka': {
# Units
'DehakaTrainEggDrone': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Zerg',
'type': 'Unit',
'is_morph': False,
},
'DehakaTrainEggZergling': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Zerg',
'type': 'Unit',
'is_morph': False,
},
'DehakaRavasaur': { # Zergling morph
'build_time': 8,
'built_from': [],
'display_name': 'Ravasaur',
'race': 'Zerg',
'type': 'Unit',
'is_morph': True,
},
'DehakaTrainEggRoach': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Zerg',
'type': 'Unit',
'is_morph': False,
},
'DehakaRoachLevel3': {
'build_time': 8,
'built_from': [],
'display_name': '<NAME>',
'race': 'Zerg',
'type': 'Unit',
'is_morph': True,
},
'DehakaTrainEggHydralisk': {
'build_time': 0,
'built_from': [],
'display_name': 'Primal Hydralisk',
'race': 'Zerg',
'type': 'Unit',
'is_morph': False,
},
'DehakaMutaliskLevel3FightMorph': { # morph from Hydralisk
'build_time': 8,
'built_from': [],
'display_name': 'Primal Mutalisk',
'race': 'Zerg',
'type': 'Unit',
'is_morph': True,
},
'DehakaTrainEggSwarmHost': {
'build_time': 0,
'built_from': [],
'display_name': 'Primal Swarm Host',
'race': 'Zerg',
'type': 'Unit',
'is_morph': False,
},
'DehakaTrainEggUltralisk': {
'build_time': 0,
'built_from': [],
'display_name': 'Primal Ultralisk',
'race': 'Zerg',
'type': 'Unit',
'is_morph': False,
},
'ImpalerDehaka': {
'build_time': 0,
'built_from': [''],
'display_name': 'Impaler',
'race': 'Zerg',
'type': 'Unit',
'is_morph': True,
},
'DehakaGuardianFightMorph': {
'build_time': 0,
'built_from': [''],
'display_name': 'Primal Guardian',
'race': 'Zerg',
'type': 'Unit',
'is_morph': True,
},
# Buildings
'DehakaBarracks': {
'build_time': 0,
'built_from': [],
'display_name': 'Primal Warden',
'race': 'Zerg',
'type': 'Building',
'is_morph': False,
},
'DehakaHatchery': {
'build_time': 0,
'built_from': [],
'display_name': 'Primal Hive',
'race': 'Zerg',
'type': 'Building',
'is_morph': False,
},
'DehakaGlevigStructure': {
'build_time': 0,
'built_from': [],
'display_name': 'Glevig\'s Den',
'race': 'Zerg',
'type': 'Building',
'is_morph': False,
},
'DehakaMurvarStructure': {
'build_time': 0,
'built_from': [],
'display_name': 'Murvar\'s Den',
'race': 'Zerg',
'type': 'Building',
'is_morph': False,
},
'DehakaDakrunStructure': {
'build_time': 0,
'built_from': [],
'display_name': 'Dakrun\'s Den',
'race': 'Zerg',
'type': 'Building',
'is_morph': False,
},
'DehakaNydusDestroyer': {
'build_time': 0,
'built_from': [],
'display_name': 'Primal Wurm',
'race': 'Zerg',
'type': 'Building',
'is_morph': False,
},
# Upgrades
'DehakaPrimalWeaponsLevel1': {
'build_time': 160,
'built_from': ['DehakaHatchery'],
'display_name': 'Primal Attacks Level 1',
'race': | |
can register more hooks. Applications should take care to
avoid creating infinite loops by recursively registering
hooks.
Hooks are called only for a top-level commit. A savepoint
creation does not call any hooks. If the transaction is
aborted, hooks are not called, and are discarded. Calling a
hook "consumes" its registration too: hook registrations do
not persist across transactions. If it's desired to call the
same hook on every transaction commit, then
`addBeforeCommitHook` must be called with that hook during
every transaction; in such a case consider registering a
synchronizer object via `ITransactionManager.registerSynch`
instead.
"""
def getBeforeCommitHooks():
"""Return iterable producing the registered `addBeforeCommitHook` hooks.
A triple ``(hook, args, kws)`` is produced for each registered hook.
The hooks are produced in the order in which they would be invoked
by a top-level transaction commit.
"""
def addAfterCommitHook(hook, args=(), kws=None):
"""Register a hook to call after a transaction commit attempt.
The specified hook function will be called after the
transaction commit succeeds or aborts. The first argument
passed to the hook is a Boolean value, `True` if the commit
succeeded, or `False` if the commit aborted.
*args* and *kws* are interpreted as for `addBeforeCommitHook`
(with the exception that there is always one positional
argument, the commit status).
As with `addBeforeCommitHook`, multiple hooks can be
registered, savepoint creation doesn't call any hooks, and
calling a hook consumes its registration.
"""
def getAfterCommitHooks():
"""Return iterable producing the registered `addAfterCommitHook` hooks.
As with `getBeforeCommitHooks`, a triple ``(hook, args, kws)``
is produced for each registered hook. The hooks are produced
in the order in which they would be invoked by a top-level
transaction commit.
"""
def addBeforeAbortHook(hook, args=(), kws=None):
"""Register a hook to call before the transaction is aborted.
The specified hook function will be called after the
transaction's abort method has been called, but before the
abort process has been started.
*args* and *kws* are interpreted as for `addBeforeCommitHook`.
As with `addBeforeCommitHook`, multiple hooks can be
registered, savepoint creation doesn't call any hooks, and
calling a hook consumes its registration.
Abort hooks are called only for a top-level abort. If the
transaction is committed, abort hooks are not called. This is
true even if the commit fails. In this case, however, the
transaction is in the ``COMMITFAILED`` state and is virtually
unusable; therefore, a top-level abort will typically follow.
"""
def getBeforeAbortHooks():
"""Return iterable producing the registered `addBeforeAbortHook` hooks.
As with `getBeforeCommitHooks`, a triple ``(hook, args, kws)``
is produced for each registered hook. The hooks are produced
in the order in which they would be invoked by a top-level
transaction abort.
"""
def addAfterAbortHook(hook, args=(), kws=None):
"""Register a hook to call after a transaction abort.
The specified hook function will be called after the
transaction abort.
*args* and *kws* are interpreted as for `addBeforeCommitHook`.
As with `addBeforeCommitHook`, multiple hooks can be
registered, savepoint creation doesn't call any hooks, and
calling a hook consumes its registration.
As with `addBeforeAbortHook`, these hooks are called only for
a top-level abort. See that method for more.
"""
def getAfterAbortHooks():
"""Return iterable producing the registered `addAfterAbortHook` hooks.
As with `getBeforeCommitHooks`, a triple ``(hook, args, kws)``
is produced for each registered hook. The hooks are produced
in the order in which they would be invoked by a top-level
transaction abort.
"""
def set_data(ob, data):
"""Hold *data* on behalf of an object
For objects such as data managers or their subobjects that
work with multiple transactions, it's convenient to store
transaction-specific data on the transaction itself. The
transaction knows nothing about the data, but simply holds it
on behalf of the object.
The object passed should be the object that needs the data, as
opposed to a simple object like a string. (Internally, the id of
the object is used as the key.)
"""
def data(ob):
"""Retrieve data held on behalf of an object.
See `set_data`.
"""
def isRetryableError(error):
"""Determine if the error is retryable.
Returns true if any joined `IRetryDataManager` considers the
error transient *or* if the error is an instance of
`TransientError`. Such errors may occur due to concurrency
issues in the underlying storage engine.
"""
class IDataManager(Interface):
"""Objects that manage transactional storage.
These objects may manage data for other objects, or they may manage
non-object storages, such as relational databases. For example,
a `ZODB.Connection.Connection`.
Note that when some data is modified, that data's data manager should
join a transaction so that data can be committed when the user commits
the transaction.
These objects implement the two-phase commit protocol in order to allow
multiple data managers to safely participate in a single transaction.
The methods `tpc_begin`, `commit`, `tpc_vote`, and then either
`tpc_finish` or `tpc_abort` are normally called in that order when
committing a transaction.
"""
transaction_manager = Attribute(
"""The transaction manager (TM) used by this data manager.
This is a public attribute, intended for read-only use. The value
is an instance of `ITransactionManager`, typically set by the data
manager's constructor.
""")
def abort(transaction):
"""Abort a transaction and forget all changes.
Abort must be called outside of a two-phase commit.
Abort is called by the transaction manager to abort
transactions that are not yet in a two-phase commit. It may
also be called when rolling back a savepoint made before the
data manager joined the transaction.
In any case, after abort is called, the data manager is no
longer participating in the transaction. If there are new
changes, the data manager must rejoin the transaction.
"""
def tpc_begin(transaction):
"""Begin commit of a transaction, starting the two-phase commit.
*transaction* is the `ITransaction` instance associated with the
transaction being committed.
"""
def commit(transaction):
"""Commit modifications to registered objects.
Save changes to be made persistent if the transaction commits
(if `tpc_finish` is called later). If `tpc_abort` is called
later, changes must not persist.
This includes conflict detection and handling. If no conflicts
or errors occur, the data manager should be prepared to make
the changes persist when `tpc_finish` is called.
"""
def tpc_vote(transaction):
"""Verify that a data manager can commit the transaction.
This is the last chance for a data manager to vote 'no'. A
data manager votes 'no' by raising an exception.
*transaction* is the `ITransaction` instance associated with the
transaction being committed.
"""
def tpc_finish(transaction):
"""Indicate confirmation that the transaction is done.
Make all changes to objects modified by this transaction persist.
*transaction* is the `ITransaction` instance associated with the
transaction being committed.
This should never fail. If this raises an exception, the
database is not expected to maintain consistency; it's a
serious error.
"""
def tpc_abort(transaction):
"""Abort a transaction.
This is called by a transaction manager to end a two-phase commit on
the data manager. Abandon all changes to objects modified by this
transaction.
*transaction* is the `ITransaction` instance associated with the
transaction being committed.
This should never fail.
"""
def sortKey():
"""Return a key to use for ordering registered `IDataManagers`.
In order to guarantee a total ordering, keys **must** be
`strings <str>`.
Transactions use a global sort order to prevent deadlock when
committing transactions involving multiple data managers.
The data managers **must** define a `sortKey` method that
provides a global ordering across all registered data managers.
"""
# Alternate version:
# """Return a consistent sort key for this connection.
#
# This allows ordering multiple connections that use the same storage
# in a consistent manner. This is unique for the lifetime of a
# connection, which is good enough to avoid ZEO deadlocks.
# """
class ISavepointDataManager(IDataManager):
def savepoint():
"""Return a data-manager savepoint (`IDataManagerSavepoint`)."""
class IRetryDataManager(IDataManager):
def should_retry(exception):
"""Return whether a given exception instance should be retried.
A data manager can provide this method to indicate that a a
transaction that raised the given error should be retried.
This method may be called by an `ITransactionManager` when
considering whether to retry a | |
col_width_dict=col_width_dict,
)
# review_api.review_edges = review_edges
return review_api
def get_match_status(ibs, aid_pair):
""" Data role for status column """
aid1, aid2 = aid_pair
assert not ut.isiterable(aid1), 'aid1=%r, aid2=%r' % (aid1, aid2)
assert not ut.isiterable(aid2), 'aid1=%r, aid2=%r' % (aid1, aid2)
text = ibs.get_match_text(aid1, aid2)
if text is None:
raise AssertionError('impossible state id_review_api')
return text
def get_reviewed_status(ibs, aid_pair):
""" Data role for status column """
aid1, aid2 = aid_pair
assert not ut.isiterable(aid1), 'aid1=%r, aid2=%r' % (aid1, aid2)
assert not ut.isiterable(aid2), 'aid1=%r, aid2=%r' % (aid1, aid2)
# FIXME: use new api
state = ibs.get_annot_pair_is_reviewed([aid1], [aid2])[0]
state_to_text = {
None: 'Unreviewed',
2: 'Auto-reviewed',
1: 'User-reviewed',
}
default = '??? unknown mode %r' % (state,)
text = state_to_text.get(state, default)
return text
def get_match_status_bgrole(ibs, aid_pair):
""" Background role for status column """
aid1, aid2 = aid_pair
truth = ibs.get_match_truth(aid1, aid2)
# logger.info('get status bgrole: %r truth=%r' % (aid_pair, truth))
truth_color = vh.get_truth_color(truth, base255=True, lighten_amount=0.35)
return truth_color
def get_reviewed_status_bgrole(ibs, aid_pair):
""" Background role for status column """
aid1, aid2 = aid_pair
truth = ibs.get_match_truth(aid1, aid2)
annotmach_reviewed = ibs.get_annot_pair_is_reviewed([aid1], [aid2])[0]
if annotmach_reviewed == 0 or annotmach_reviewed is None:
lighten_amount = 0.9
elif annotmach_reviewed == 2:
lighten_amount = 0.7
else:
lighten_amount = 0.35
truth_color = vh.get_truth_color(truth, base255=True, lighten_amount=lighten_amount)
# truth = ibs.get_match_truth(aid1, aid2)
# logger.info('get status bgrole: %r truth=%r' % (aid_pair, truth))
# truth_color = vh.get_truth_color(truth, base255=True, lighten_amount=0.35)
return truth_color
def get_match_thumb_fname(
cm, daid, qreq_, view_orientation='vertical', draw_matches=True, draw_heatmask=False
):
"""
CommandLine:
python -m wbia.gui.id_review_api --exec-get_match_thumb_fname
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.gui.id_review_api import * # NOQA
>>> import wbia
>>> cm, qreq_ = wbia.testdata_cm('PZ_MTEST')
>>> thumbsize = (128, 128)
>>> daid = cm.get_top_aids()[0]
>>> match_thumb_fname = get_match_thumb_fname(cm, daid, qreq_)
>>> result = match_thumb_fname
>>> print(result)
match_aids=1,1_cfgstr=ubpzwu5k54h6xbnr.jpg
"""
# Make thumbnail name
config_hash = ut.hashstr27(qreq_.get_cfgstr())
qaid = cm.qaid
args = (
qaid,
daid,
config_hash,
draw_matches,
draw_heatmask,
view_orientation,
)
match_thumb_fname = (
'match_aids=%d,%d_cfgstr=%s_draw=%s_mask=%s_orientation=%s.jpg' % args
)
return match_thumb_fname
def ensure_match_img(ibs, cm, daid, qreq_=None, match_thumbtup_cache={}):
r"""
CommandLine:
python -m wbia.gui.id_review_api --test-ensure_match_img --show
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.gui.id_review_api import * # NOQA
>>> import wbia
>>> # build test data
>>> cm, qreq_ = wbia.testdata_cm()
>>> daid = cm.get_top_aids()[0]
>>> match_thumbtup_cache = {}
>>> # execute function
>>> match_thumb_fpath_ = ensure_match_img(qreq_.ibs, cm, daid, qreq_,
>>> match_thumbtup_cache)
>>> # verify results
>>> result = str(match_thumb_fpath_)
>>> print(result)
>>> ut.quit_if_noshow()
>>> ut.startfile(match_thumb_fpath_, quote=True)
"""
# from os.path import exists
match_thumbdir = ibs.get_match_thumbdir()
match_thumb_fname = get_match_thumb_fname(cm, daid, qreq_)
match_thumb_fpath_ = ut.unixjoin(match_thumbdir, match_thumb_fname)
# if exists(match_thumb_fpath_):
# return match_thumb_fpath_
if match_thumb_fpath_ in match_thumbtup_cache:
fpath = match_thumbtup_cache[match_thumb_fpath_]
else:
# TODO: just draw the image at the correct thumbnail size
# TODO: draw without matplotlib?
# with ut.Timer('render-1'):
fpath = cm.imwrite_single_annotmatch(
qreq_,
daid,
fpath=match_thumb_fpath_,
saveax=True,
fnum=32,
notitle=True,
verbose=False,
)
# with ut.Timer('render-2'):
# img = cm.render_single_annotmatch(qreq_, daid, fnum=32, notitle=True, dpi=30)
# cv2.imwrite(match_thumb_fpath_, img)
# fpath = match_thumb_fpath_
# with ut.Timer('render-3'):
# fpath = match_thumb_fpath_
# render_config = {
# 'dpi' : 60,
# 'draw_fmatches' : True,
# #'vert' : view_orientation == 'vertical',
# 'show_aidstr' : False,
# 'show_name' : False,
# 'show_exemplar' : False,
# 'show_num_gt' : False,
# 'show_timedelta' : False,
# 'show_name_rank' : False,
# 'show_score' : False,
# 'show_annot_score' : False,
# 'show_name_score' : False,
# 'draw_lbl' : False,
# 'draw_border' : False,
# }
# cm.imwrite_single_annotmatch2(qreq_, daid, fpath, fnum=32, notitle=True, **render_config)
# logger.info('fpath = %r' % (fpath,))
match_thumbtup_cache[match_thumb_fpath_] = fpath
return fpath
def make_ensure_match_img_nosql_func(qreq_, cm, daid):
r"""
CommandLine:
python -m wbia.gui.id_review_api --test-ensure_match_img --show
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.gui.id_review_api import * # NOQA
>>> import wbia
>>> # build test data
>>> cm, qreq_ = wbia.testdata_cm()
>>> ibs = qreq_.ibs
>>> daid = cm.get_top_aids()[0]
>>> match_thumbtup_cache = {}
>>> # execute function
>>> match_thumb_fpath_ = ensure_match_img(qreq_.ibs, cm, daid, qreq_, match_thumbtup_cache)
>>> # verify results
>>> result = str(match_thumb_fpath_)
>>> print(result)
>>> ut.quit_if_noshow()
>>> ut.startfile(match_thumb_fpath_, quote=True)
"""
# import wbia.viz
from wbia.viz import viz_matches
import cv2
import io
import wbia.plottool as pt
import vtool as vt
import matplotlib as mpl
if cm.__class__.__name__ == 'PairwiseMatch':
# HACK DO THIS THE VTOOL WAY
match = cm
ibs = qreq_ # VERY HACK
match_thumbdir = ibs.get_match_thumbdir()
cfgstr = hash(match.config) # HACK only works if config is already a hashdict
match_thumb_fname = 'tmpmatch-%d-%d-%s.jpg' % (
match.annot1['aid'],
match.annot2['aid'],
cfgstr,
)
fpath = ut.unixjoin(match_thumbdir, match_thumb_fname)
def main_thread_load2():
rchip1, kpts1 = ut.dict_take(match.annot1, ['rchip', 'kpts'])
rchip2, kpts2 = ut.dict_take(match.annot2, ['rchip', 'kpts'])
return (match,)
def nosql_draw2(check_func, match):
from matplotlib.backends.backend_agg import FigureCanvas
try:
from matplotlib.backends.backend_agg import Figure
except ImportError:
from matplotlib.figure import Figure
was_interactive = mpl.is_interactive()
if was_interactive:
mpl.interactive(False)
# fnum = 32
fig = Figure()
canvas = FigureCanvas(fig) # NOQA
# fig.clf()
ax = fig.add_subplot(1, 1, 1)
if check_func is not None and check_func():
return
ax, xywh1, xywh2 = match.show(ax=ax)
if check_func is not None and check_func():
return
savekw = {
# 'dpi' : 60,
'dpi': 80,
}
axes_extents = pt.extract_axes_extents(fig)
# assert len(axes_extents) == 1, 'more than one axes'
extent = axes_extents[0]
with io.BytesIO() as stream:
# This call takes 23% - 15% of the time depending on settings
fig.savefig(stream, bbox_inches=extent, **savekw)
stream.seek(0)
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
if check_func is not None and check_func():
return
pt.plt.close(fig)
image = cv2.imdecode(data, 1)
thumbsize = 221
max_dsize = (thumbsize, thumbsize)
dsize, sx, sy = vt.resized_clamped_thumb_dims(vt.get_size(image), max_dsize)
if check_func is not None and check_func():
return
image = vt.resize(image, dsize)
vt.imwrite(fpath, image)
if check_func is not None and check_func():
return
# fig.savefig(fpath, bbox_inches=extent, **savekw)
# match_thumbtup_cache[match_thumb_fpath_] = fpath
return fpath, nosql_draw2, main_thread_load2
aid1 = cm.qaid
aid2 = daid
ibs = qreq_.ibs
resize_factor = 0.5
match_thumbdir = ibs.get_match_thumbdir()
match_thumb_fname = get_match_thumb_fname(cm, daid, qreq_)
fpath = ut.unixjoin(match_thumbdir, match_thumb_fname)
def main_thread_load():
# This gets executed in the main thread and collects data
# from sql
rchip1_fpath, rchip2_fpath, kpts1, kpts2 = viz_matches._get_annot_pair_info(
ibs, aid1, aid2, qreq_, draw_fmatches=True, as_fpath=True
)
return rchip1_fpath, rchip2_fpath, kpts1, kpts2
def nosql_draw(check_func, rchip1_fpath, rchip2_fpath, kpts1, kpts2):
# This gets executed in the child thread and does drawing async style
# from matplotlib.backends.backend_pdf import FigureCanvasPdf as FigureCanvas
# from matplotlib.backends.backend_pdf import Figure
# from matplotlib.backends.backend_svg import FigureCanvas
# from matplotlib.backends.backend_svg import Figure
from matplotlib.backends.backend_agg import FigureCanvas
try:
from matplotlib.backends.backend_agg import Figure
except ImportError:
from matplotlib.figure import Figure
kpts1_ = vt.offset_kpts(kpts1, (0, 0), (resize_factor, resize_factor))
kpts2_ = vt.offset_kpts(kpts2, (0, 0), (resize_factor, resize_factor))
# from matplotlib.figure import Figure
if check_func is not None and check_func():
return
rchip1 = vt.imread(rchip1_fpath)
rchip1 = vt.resize_image_by_scale(rchip1, resize_factor)
if check_func is not None and check_func():
return
rchip2 = vt.imread(rchip2_fpath)
rchip2 = vt.resize_image_by_scale(rchip2, resize_factor)
if check_func is not None and check_func():
return
try:
idx = cm.daid2_idx[daid]
fm = cm.fm_list[idx]
fsv = None if cm.fsv_list is None else cm.fsv_list[idx]
fs = None if fsv is None else fsv.prod(axis=1)
except KeyError:
fm = []
fs = None
fsv = None
maxnum = 200
if fs is not None and len(fs) > maxnum:
# HACK TO ONLY SHOW TOP MATCHES
sortx = fs.argsort()[::-1]
fm = fm.take(sortx[:maxnum], axis=0)
fs = fs.take(sortx[:maxnum], axis=0)
was_interactive = mpl.is_interactive()
if was_interactive:
mpl.interactive(False)
# fnum = 32
fig = Figure()
canvas = FigureCanvas(fig) # NOQA
# fig.clf()
ax = fig.add_subplot(1, 1, 1)
if check_func is not None and check_func():
return
# fig = pt.plt.figure(fnum)
# H1 = np.eye(3)
# H2 = np.eye(3)
# H1[0, 0] = .5
# H1[1, 1] = .5
# H2[0, 0] = .5
# H2[1, 1] = .5
ax, xywh1, xywh2 = pt.show_chipmatch2(
rchip1, rchip2, kpts1_, kpts2_, fm, fs=fs, colorbar_=False, ax=ax
)
if check_func is not None and check_func():
return
savekw = {
# 'dpi' : 60,
'dpi': 80,
}
axes_extents = pt.extract_axes_extents(fig)
# assert len(axes_extents) == 1, 'more than one axes'
extent = axes_extents[0]
with io.BytesIO() as stream:
# This call takes 23% - 15% of the time depending on settings
fig.savefig(stream, bbox_inches=extent, **savekw)
stream.seek(0)
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
if check_func is not None and check_func():
return
pt.plt.close(fig)
image = cv2.imdecode(data, 1)
thumbsize = 221
max_dsize = (thumbsize, thumbsize)
dsize, sx, sy = vt.resized_clamped_thumb_dims(vt.get_size(image), max_dsize)
if check_func is | |
'Error: Undetermined'
raise FieldTypeInDevError(fname)
if self.flds.shape[0] == 1:
self.im(' - Scalar field detected')
self.fieldNature = 'Scalar'
else:
self.im(' - Vectorial field detected: '+str(self.flds.shape[0])+' fields')
self.fieldNature ='Vectorial'
self.im(' -> '+self.fieldType)
self.im('Reading and resampling operations done!')
def stag2VTU(self,fname=None,path='./',ASCII=False,verbose=True):
""" Extension of the stagVTK package, directly available on stagData !
This function creat '.vtu' or 'xdmf/h5' file readable with Paraview to efficiently
visualize 3D data contain in a stagData object. This function works directly
on non overlapping stagData object.
Note also that the internal field stagData.slayers of the stagData object
must be filled.
<i> : fname = str, name of the exported file without any extention
path = str, path where you want to export your new .vtu file.
[Default: path='./']
ASCII = bool, if True, the .vtu file will be write in ASCII mode
if not, in binary mode. [Default, ASCII=True]
"""
self.im('Requested: Build VTU from StagData object')
if self.geometry == 'cart2D' or self.geometry == 'annulus':
raise VisuGridGeometryError(self.geometry,'cart3D or yy')
if fname == None:
import time
(y,m,d,h,mins,secs,bin1,bin2,bin3) = time.localtime()
fname = self.fname+'_'+str(d)+'-'+str(m)+'-'+str(y)+'_'+str(h)+'-'+str(mins)+'-'+str(secs)
self.im('Automatic file name attribution: '+fname)
#Importation of the stagVTK package
from .stagVTK import stag2VTU
stag2VTU(fname,self,path,ASCII=ASCII,verbose=verbose)
class StagCartesianGeometry(MainStagObject):
"""
Defines the StagCartesianGeometry object, derived from MainStagObject
This object is conditionally inherited in StagData.
"""
def __init__(self,geometry):
super().__init__() # inherit all the methods and properties from MainStagObject
self.geometry = geometry
# ----- Cartesian 2D and 3D geometries ----- #
self.XYZind = [] #Matrix of good index after the mesh operation
self.x = [] #Matrix of X coordinates meshed
self.y = [] #Matrix of Y coordinates meshed
self.z = [] #Matrix of Z coordinates meshed
self.v = [] #Matrix of scalar field (or norm of velocity)
self.vx = [] #Matrix of x-component of the velocity field for Cartesian grids
self.vy = [] #Matrix of y-component of the velocity field for Cartesian grids
self.vz = [] #Matrix of z-component of the velocity field for Cartesian grids
self.P = [] #Matrix of Pressure field for Cartesian grids
def stagProcessing(self):
"""
This function processes stag data according to a Cartesian geometry.
"""
self.im('Processing stag Data:')
self.im(' - Grid Geometry')
if self.geometry == 'cart2D':
self.im(' - 2D cartesian grid geometry')
else:
self.im(' - 3D cartesian grid geometry')
(self.x,self.y,self.z) = np.meshgrid(self.x_coords,self.y_coords,self.z_coords,indexing='ij')
#Same operation but on index matrix:
(Xind,Yind,Zind) = np.meshgrid(self.xind,self.yind,self.zind, indexing='ij')
Xind = Xind.reshape(Xind.shape[0]*Xind.shape[1]*Xind.shape[2])
Yind = Yind.reshape(Yind.shape[0]*Yind.shape[1]*Yind.shape[2])
Zind = Zind.reshape(Zind.shape[0]*Zind.shape[1]*Zind.shape[2])
self.XYZind = np.multiply(np.multiply(Xind,Yind),Zind)
# Application of redFlag on index matrix:
goodIndex = np.array(range(self.nx0*self.ny0*self.nz0))
goodIndex = goodIndex[np.array(self.XYZind,dtype=bool)]
#Processing of the field according to its scalar or vectorial nature:
if self.fieldNature == 'Scalar':
self.im(' - Build data grid for scalar field')
(Nx, Ny, Nz) = self.header.get('nts')
V = self.flds[0,:,:,:,0].reshape(Nx*Ny*Nz)
self.v = V[goodIndex].reshape(self.nx,self.ny,self.nz)
#Creation of empty vectorial fields arrays:
self.vx = np.array(self.vx)
self.vy = np.array(self.vy)
self.vz = np.array(self.vz)
self.P = np.array(self.P)
elif self.fieldNature == 'Vectorial':
self.im(' - Build data grid for vectorial field')
(Nx, Ny, Nz) = self.header.get('nts')
temp_vx = self.flds[0][0:Nx,0:Ny,:].reshape(Nx*Ny*Nz)
temp_vy = self.flds[1][0:Nx,0:Ny,:].reshape(Nx*Ny*Nz)
temp_vz = self.flds[2][0:Nx,0:Ny,:].reshape(Nx*Ny*Nz)
temp_P = self.flds[3][0:Nx,0:Ny,:].reshape(Nx*Ny*Nz)
self.vx = temp_vx[goodIndex].reshape(self.nx,self.ny,self.nz)
self.vy = temp_vy[goodIndex].reshape(self.nx,self.ny,self.nz)
self.vz = temp_vz[goodIndex].reshape(self.nx,self.ny,self.nz)
self.P = temp_P[goodIndex].reshape(self.nx,self.ny,self.nz)
self.v = np.sqrt(self.vx**2+self.vy**2+self.vz**2) #the norm
# == Processing Finish !
self.im('Processing of stag data done!')
class StagYinYangGeometry(MainStagObject):
"""
Secondary geom class
"""
def __init__(self):
super().__init__() # inherit all the methods and properties from MainStagObject
self.geometry = 'yy'
# ----- Yin Yang geometry ----- #
self.X = [] #Matrix of X coordinates meshed
self.Y = [] #Matrix of Y coordinates meshed
self.Z = [] #Matrix of Z coordinates meshed
self.layers = [] #matrix of layer's index meshed
self.XYZind = [] #Matrix of good index after the mesh operation
self.r1 = [] #Matrix of the radius of points for Yin grid
self.r2 = [] #Matrix of the radius of points for Yang grid
self.x1_overlap = []#Yin grid x matrix - overlapping grids:
self.y1_overlap = []#Yin grid y matrix
self.z1_overlap = []#Yin grid z matrix
self.x2_overlap = []#Yang grid x matrix
self.y2_overlap = []#Yang grid y matrix
self.z2_overlap = []#Yang grid z matrix
self.x1 = [] #Yin grid x matrix - non-overlapping grids:
self.y1 = [] #Yin grid y matrix
self.z1 = [] #Yin grid z matrix
self.x2 = [] #Yang grid x matrix
self.y2 = [] #Yang grid y matrix
self.z2 = [] #Yang grid z matrix
self.r1 = [] #Matrice of spherical coordinates r for the Yin grid
self.theta1 = [] #Matrice of spherical coordinates theta for the Yin grid
self.phi1 = [] #Matrice of spherical coordinates phi for the Yin grid
self.r2 = [] #Matrice of spherical coordinates r for the Yang grid
self.theta2 = [] #Matrice of spherical coordinates theta for the Yang grid
self.phi2 = [] #Matrice of spherical coordinates phi for the Yang grid
self.redFlags = [] #Matrix of wrong index in YY (overlaping pbs)
self.x1_redf = [] #Matrix of redflag x-coordinates for Yin grid
self.y1_redf = [] #Matrix of redflag y-coordinates for Yin grid
self.z1_redf = [] #Matrix of redflag z-coordinates for Yin grid
self.x2_redf = [] #Matrix of redflag x-coordinates for Yang grid
self.y2_redf = [] #Matrix of redflag y-coordinates for Yang grid
self.z2_redf = [] #Matrix of redflag z-coordinates for Yang grid
self.redFlags_layers = [] #Matrix of layer's index meshed for redFlags points
#For scalar field only:
self.v1_overlap = []#Complete Yin field, corresponding to over '_overlap' matrices
self.v2_overlap = []#Complete Yang field, corresponding to over '_overlap' matrices
self.v1 = [] #Matrix of scalar field for the Yin grid (or norm of vectorial on Yin)
self.v2 = [] #Matrix of scalar field for the Yang grid (or norm of vectorial on Yang)
#For vectorial field only:
self.vx1_overlap= [] #Complete vx Yin field, corresponding to over '_overlap' matrices
self.vx2_overlap= [] #Complete vx Yang field, corresponding to over '_overlap' matrices
self.vy1_overlap= [] #Complete vy Yin field
self.vy2_overlap= [] #Complete vy Yang field
self.vz1_overlap= [] #Complete vz Yin field
self.vz2_overlap= [] #Complete vz Yang field
self.P1_overlap = [] #Complete P Yin field
self.P2_overlap = [] #Complete P Yang field
self.vx1 = [] #Matrix of x-component of the vectorial field for the Yin grid
self.vx2 = [] #Matrix of x-component of the vectorial field for the Yang grid
self.vy1 = [] #Matrix of y-component of the vectorial field for the Yin grid
self.vy2 = [] #Matrix of y-component of the vectorial field for the Yang grid
self.vz1 = [] #Matrix of z-component of the vectorial field for the Yin grid
self.vz2 = [] #Matrix of z-component of the vectorial field for the Yang grid
self.P1 = [] #Matrix of the Pressure field for the Yin grid
self.P2 = [] #Matrix of the Pressure field for the Yang grid
self.vr1 = [] #Matrix of radial component of the vectorial field for the Yin grid
self.vtheta1 = [] #Matrix of theta component of the vectorial field for the Yin grid
self.vphi1 = [] #Matrix of phi component of the vectorial field for the Yin grid
self.vr2 = [] #Matrix of radial component of the vectorial field for the Yang grid
self.vtheta2 = [] #Matrix of theta component of the vectorial field for the Yang grid
self.vphi2 = [] #Matrix of phi component of the vectorial field for the Yang grid
def stagProcessing(self, build_redflag_point=False, build_overlapping_field=False):
""" This function process stag data according to a YinYang geometry.
If build_redflag_point == True, build coordinates matrices of the
redflag points and fills fields x-y-z_redf
If build_overlapping_field == True, build ghost points on YY corner"""
self.im('Processing stag Data:')
self.im(' - Grid Geometry')
self.im(' - Yin-Yang grid geometry')
self.im(' - Preprocessing of coordinates matrices')
(self.X,self.Y,self.Z) = np.meshgrid(self.x_coords,self.y_coords,self.z_coords, indexing='ij')
self.X = self.X.reshape(self.X.shape[0]*self.X.shape[1]*self.X.shape[2])
self.Y = self.Y.reshape(self.Y.shape[0]*self.Y.shape[1]*self.Y.shape[2])
self.Z = self.Z.reshape(self.Z.shape[0]*self.Z.shape[1]*self.Z.shape[2])
#Same operation but on layers matrix:
(self.bin, self.BIN, self.layers) = np.meshgrid(self.x_coords,self.y_coords,self.slayers, indexing='ij')
self.layers = self.layers.reshape(self.layers.shape[0]*self.layers.shape[1]*self.layers.shape[2])
self.bin = None
self.BIN = None
#Same operation but on index matrix:
(Xind,Yind,Zind) = np.meshgrid(self.xind,self.yind,self.zind, indexing='ij')
Xind = Xind.reshape(Xind.shape[0]*Xind.shape[1]*Xind.shape[2])
Yind = Yind.reshape(Yind.shape[0]*Yind.shape[1]*Yind.shape[2])
Zind = Zind.reshape(Zind.shape[0]*Zind.shape[1]*Zind.shape[2])
self.XYZind | |
GL/glext.h:2014
GL_UNSIGNED_IDENTITY_NV = 34102 # GL/glext.h:2015
GL_UNSIGNED_INVERT_NV = 34103 # GL/glext.h:2016
GL_EXPAND_NORMAL_NV = 34104 # GL/glext.h:2017
GL_EXPAND_NEGATE_NV = 34105 # GL/glext.h:2018
GL_HALF_BIAS_NORMAL_NV = 34106 # GL/glext.h:2019
GL_HALF_BIAS_NEGATE_NV = 34107 # GL/glext.h:2020
GL_SIGNED_IDENTITY_NV = 34108 # GL/glext.h:2021
GL_SIGNED_NEGATE_NV = 34109 # GL/glext.h:2022
GL_SCALE_BY_TWO_NV = 34110 # GL/glext.h:2023
GL_SCALE_BY_FOUR_NV = 34111 # GL/glext.h:2024
GL_SCALE_BY_ONE_HALF_NV = 34112 # GL/glext.h:2025
GL_BIAS_BY_NEGATIVE_ONE_HALF_NV = 34113 # GL/glext.h:2026
GL_COMBINER_INPUT_NV = 34114 # GL/glext.h:2027
GL_COMBINER_MAPPING_NV = 34115 # GL/glext.h:2028
GL_COMBINER_COMPONENT_USAGE_NV = 34116 # GL/glext.h:2029
GL_COMBINER_AB_DOT_PRODUCT_NV = 34117 # GL/glext.h:2030
GL_COMBINER_CD_DOT_PRODUCT_NV = 34118 # GL/glext.h:2031
GL_COMBINER_MUX_SUM_NV = 34119 # GL/glext.h:2032
GL_COMBINER_SCALE_NV = 34120 # GL/glext.h:2033
GL_COMBINER_BIAS_NV = 34121 # GL/glext.h:2034
GL_COMBINER_AB_OUTPUT_NV = 34122 # GL/glext.h:2035
GL_COMBINER_CD_OUTPUT_NV = 34123 # GL/glext.h:2036
GL_COMBINER_SUM_OUTPUT_NV = 34124 # GL/glext.h:2037
GL_MAX_GENERAL_COMBINERS_NV = 34125 # GL/glext.h:2038
GL_NUM_GENERAL_COMBINERS_NV = 34126 # GL/glext.h:2039
GL_COLOR_SUM_CLAMP_NV = 34127 # GL/glext.h:2040
GL_COMBINER0_NV = 34128 # GL/glext.h:2041
GL_COMBINER1_NV = 34129 # GL/glext.h:2042
GL_COMBINER2_NV = 34130 # GL/glext.h:2043
GL_COMBINER3_NV = 34131 # GL/glext.h:2044
GL_COMBINER4_NV = 34132 # GL/glext.h:2045
GL_COMBINER5_NV = 34133 # GL/glext.h:2046
GL_COMBINER6_NV = 34134 # GL/glext.h:2047
GL_COMBINER7_NV = 34135 # GL/glext.h:2048
# NV_fog_distance (GL/glext.h:2056)
GL_FOG_DISTANCE_MODE_NV = 34138 # GL/glext.h:2057
GL_EYE_RADIAL_NV = 34139 # GL/glext.h:2058
GL_EYE_PLANE_ABSOLUTE_NV = 34140 # GL/glext.h:2059
# NV_texgen_emboss (GL/glext.h:2063)
GL_EMBOSS_LIGHT_NV = 34141 # GL/glext.h:2064
GL_EMBOSS_CONSTANT_NV = 34142 # GL/glext.h:2065
GL_EMBOSS_MAP_NV = 34143 # GL/glext.h:2066
# NV_blend_square (GL/glext.h:2069)
# NV_texture_env_combine4 (GL/glext.h:2072)
GL_COMBINE4_NV = 34051 # GL/glext.h:2073
GL_SOURCE3_RGB_NV = 34179 # GL/glext.h:2074
GL_SOURCE3_ALPHA_NV = 34187 # GL/glext.h:2075
GL_OPERAND3_RGB_NV = 34195 # GL/glext.h:2076
GL_OPERAND3_ALPHA_NV = 34203 # GL/glext.h:2077
# MESA_resize_buffers (GL/glext.h:2080)
# MESA_window_pos (GL/glext.h:2083)
# EXT_texture_compression_s3tc (GL/glext.h:2086)
GL_COMPRESSED_RGB_S3TC_DXT1_EXT = 33776 # GL/glext.h:2087
GL_COMPRESSED_RGBA_S3TC_DXT1_EXT = 33777 # GL/glext.h:2088
GL_COMPRESSED_RGBA_S3TC_DXT3_EXT = 33778 # GL/glext.h:2089
GL_COMPRESSED_RGBA_S3TC_DXT5_EXT = 33779 # GL/glext.h:2090
# IBM_cull_vertex (GL/glext.h:2093)
GL_CULL_VERTEX_IBM = 103050 # GL/glext.h:2094
# IBM_multimode_draw_arrays (GL/glext.h:2097)
# IBM_vertex_array_lists (GL/glext.h:2100)
GL_VERTEX_ARRAY_LIST_IBM = 103070 # GL/glext.h:2101
GL_NORMAL_ARRAY_LIST_IBM = 103071 # GL/glext.h:2102
GL_COLOR_ARRAY_LIST_IBM = 103072 # GL/glext.h:2103
GL_INDEX_ARRAY_LIST_IBM = 103073 # GL/glext.h:2104
GL_TEXTURE_COORD_ARRAY_LIST_IBM = 103074 # GL/glext.h:2105
GL_EDGE_FLAG_ARRAY_LIST_IBM = 103075 # GL/glext.h:2106
GL_FOG_COORDINATE_ARRAY_LIST_IBM = 103076 # GL/glext.h:2107
GL_SECONDARY_COLOR_ARRAY_LIST_IBM = 103077 # GL/glext.h:2108
GL_VERTEX_ARRAY_LIST_STRIDE_IBM = 103080 # GL/glext.h:2109
GL_NORMAL_ARRAY_LIST_STRIDE_IBM = 103081 # GL/glext.h:2110
GL_COLOR_ARRAY_LIST_STRIDE_IBM = 103082 # GL/glext.h:2111
GL_INDEX_ARRAY_LIST_STRIDE_IBM = 103083 # GL/glext.h:2112
GL_TEXTURE_COORD_ARRAY_LIST_STRIDE_IBM = 103084 # GL/glext.h:2113
GL_EDGE_FLAG_ARRAY_LIST_STRIDE_IBM = 103085 # GL/glext.h:2114
GL_FOG_COORDINATE_ARRAY_LIST_STRIDE_IBM = 103086 # GL/glext.h:2115
GL_SECONDARY_COLOR_ARRAY_LIST_STRIDE_IBM = 103087 # GL/glext.h:2116
# SGIX_subsample (GL/glext.h:2119)
GL_PACK_SUBSAMPLE_RATE_SGIX = 34208 # GL/glext.h:2120
GL_UNPACK_SUBSAMPLE_RATE_SGIX = 34209 # GL/glext.h:2121
GL_PIXEL_SUBSAMPLE_4444_SGIX = 34210 # GL/glext.h:2122
GL_PIXEL_SUBSAMPLE_2424_SGIX = 34211 # GL/glext.h:2123
GL_PIXEL_SUBSAMPLE_4242_SGIX = 34212 # GL/glext.h:2124
# SGIX_ycrcb_subsample (GL/glext.h:2127)
# SGIX_ycrcba (GL/glext.h:2130)
GL_YCRCB_SGIX = 33560 # GL/glext.h:2131
GL_YCRCBA_SGIX = 33561 # GL/glext.h:2132
# SGI_depth_pass_instrument (GL/glext.h:2135)
GL_DEPTH_PASS_INSTRUMENT_SGIX = 33552 # GL/glext.h:2136
GL_DEPTH_PASS_INSTRUMENT_COUNTERS_SGIX = 33553 # GL/glext.h:2137
GL_DEPTH_PASS_INSTRUMENT_MAX_SGIX = 33554 # GL/glext.h:2138
# 3DFX_texture_compression_FXT1 (GL/glext.h:2141)
GL_COMPRESSED_RGB_FXT1_3DFX = 34480 # GL/glext.h:2142
GL_COMPRESSED_RGBA_FXT1_3DFX = 34481 # GL/glext.h:2143
# 3DFX_multisample (GL/glext.h:2146)
GL_MULTISAMPLE_3DFX = 34482 # GL/glext.h:2147
GL_SAMPLE_BUFFERS_3DFX = 34483 # GL/glext.h:2148
GL_SAMPLES_3DFX = 34484 # GL/glext.h:2149
GL_MULTISAMPLE_BIT_3DFX = 536870912 # GL/glext.h:2150
# 3DFX_tbuffer (GL/glext.h:2153)
# EXT_multisample (GL/glext.h:2156)
GL_MULTISAMPLE_EXT = 32925 # GL/glext.h:2157
GL_SAMPLE_ALPHA_TO_MASK_EXT = 32926 # GL/glext.h:2158
GL_SAMPLE_ALPHA_TO_ONE_EXT = 32927 # GL/glext.h:2159
GL_SAMPLE_MASK_EXT = 32928 # GL/glext.h:2160
GL_1PASS_EXT = 32929 # GL/glext.h:2161
GL_2PASS_0_EXT = 32930 # GL/glext.h:2162
GL_2PASS_1_EXT = 32931 # GL/glext.h:2163
GL_4PASS_0_EXT = 32932 # GL/glext.h:2164
GL_4PASS_1_EXT = 32933 # GL/glext.h:2165
GL_4PASS_2_EXT = 32934 # GL/glext.h:2166
GL_4PASS_3_EXT = 32935 # GL/glext.h:2167
GL_SAMPLE_BUFFERS_EXT = 32936 # GL/glext.h:2168
GL_SAMPLES_EXT = 32937 # GL/glext.h:2169
GL_SAMPLE_MASK_VALUE_EXT = 32938 # GL/glext.h:2170
GL_SAMPLE_MASK_INVERT_EXT = 32939 # GL/glext.h:2171
GL_SAMPLE_PATTERN_EXT = 32940 # GL/glext.h:2172
GL_MULTISAMPLE_BIT_EXT = 536870912 # GL/glext.h:2173
# SGIX_vertex_preclip (GL/glext.h:2176)
GL_VERTEX_PRECLIP_SGIX = 33774 # GL/glext.h:2177
GL_VERTEX_PRECLIP_HINT_SGIX = 33775 # GL/glext.h:2178
# SGIX_convolution_accuracy (GL/glext.h:2181)
GL_CONVOLUTION_HINT_SGIX = 33558 # GL/glext.h:2182
# SGIX_resample (GL/glext.h:2185)
GL_PACK_RESAMPLE_SGIX = 33836 # GL/glext.h:2186
GL_UNPACK_RESAMPLE_SGIX = 33837 # GL/glext.h:2187
GL_RESAMPLE_REPLICATE_SGIX = 33838 # GL/glext.h:2188
GL_RESAMPLE_ZERO_FILL_SGIX = 33839 # GL/glext.h:2189
GL_RESAMPLE_DECIMATE_SGIX = 33840 # GL/glext.h:2190
# SGIS_point_line_texgen (GL/glext.h:2193)
GL_EYE_DISTANCE_TO_POINT_SGIS = 33264 # GL/glext.h:2194
GL_OBJECT_DISTANCE_TO_POINT_SGIS = 33265 # GL/glext.h:2195
GL_EYE_DISTANCE_TO_LINE_SGIS = 33266 # GL/glext.h:2196
GL_OBJECT_DISTANCE_TO_LINE_SGIS = 33267 # GL/glext.h:2197
GL_EYE_POINT_SGIS = 33268 # GL/glext.h:2198
GL_OBJECT_POINT_SGIS = 33269 # GL/glext.h:2199
GL_EYE_LINE_SGIS = 33270 # GL/glext.h:2200
GL_OBJECT_LINE_SGIS = 33271 # GL/glext.h:2201
# SGIS_texture_color_mask (GL/glext.h:2204)
GL_TEXTURE_COLOR_WRITEMASK_SGIS = 33263 # GL/glext.h:2205
# EXT_texture_env_dot3 (GL/glext.h:2208)
GL_DOT3_RGB_EXT = 34624 # GL/glext.h:2209
GL_DOT3_RGBA_EXT = 34625 # GL/glext.h:2210
# ATI_texture_mirror_once (GL/glext.h:2213)
GL_MIRROR_CLAMP_ATI = 34626 # GL/glext.h:2214
GL_MIRROR_CLAMP_TO_EDGE_ATI = 34627 # GL/glext.h:2215
# NV_fence (GL/glext.h:2218)
GL_ALL_COMPLETED_NV = 34034 # GL/glext.h:2219
GL_FENCE_STATUS_NV = 34035 # GL/glext.h:2220
GL_FENCE_CONDITION_NV = 34036 # GL/glext.h:2221
# IBM_texture_mirrored_repeat (GL/glext.h:2224)
GL_MIRRORED_REPEAT_IBM = 33648 # GL/glext.h:2225
# NV_evaluators (GL/glext.h:2228)
GL_EVAL_2D_NV = 34496 # GL/glext.h:2229
GL_EVAL_TRIANGULAR_2D_NV = 34497 # GL/glext.h:2230
GL_MAP_TESSELLATION_NV = 34498 # GL/glext.h:2231
GL_MAP_ATTRIB_U_ORDER_NV = 34499 # GL/glext.h:2232
GL_MAP_ATTRIB_V_ORDER_NV = 34500 # GL/glext.h:2233
GL_EVAL_FRACTIONAL_TESSELLATION_NV = 34501 # GL/glext.h:2234
GL_EVAL_VERTEX_ATTRIB0_NV = 34502 # GL/glext.h:2235
GL_EVAL_VERTEX_ATTRIB1_NV = 34503 # GL/glext.h:2236
GL_EVAL_VERTEX_ATTRIB2_NV = 34504 # GL/glext.h:2237
GL_EVAL_VERTEX_ATTRIB3_NV = 34505 # GL/glext.h:2238
GL_EVAL_VERTEX_ATTRIB4_NV = 34506 # GL/glext.h:2239
GL_EVAL_VERTEX_ATTRIB5_NV = 34507 # GL/glext.h:2240
GL_EVAL_VERTEX_ATTRIB6_NV = 34508 # GL/glext.h:2241
GL_EVAL_VERTEX_ATTRIB7_NV = 34509 # GL/glext.h:2242
GL_EVAL_VERTEX_ATTRIB8_NV = 34510 # GL/glext.h:2243
GL_EVAL_VERTEX_ATTRIB9_NV = 34511 # GL/glext.h:2244
GL_EVAL_VERTEX_ATTRIB10_NV = 34512 # GL/glext.h:2245
GL_EVAL_VERTEX_ATTRIB11_NV = 34513 # GL/glext.h:2246
GL_EVAL_VERTEX_ATTRIB12_NV = 34514 # GL/glext.h:2247
GL_EVAL_VERTEX_ATTRIB13_NV = 34515 # GL/glext.h:2248
GL_EVAL_VERTEX_ATTRIB14_NV = 34516 # GL/glext.h:2249
GL_EVAL_VERTEX_ATTRIB15_NV = 34517 # GL/glext.h:2250
GL_MAX_MAP_TESSELLATION_NV = 34518 # GL/glext.h:2251
GL_MAX_RATIONAL_EVAL_ORDER_NV = 34519 # GL/glext.h:2252
# NV_packed_depth_stencil (GL/glext.h:2255)
GL_DEPTH_STENCIL_NV = 34041 # GL/glext.h:2256
GL_UNSIGNED_INT_24_8_NV = 34042 # GL/glext.h:2257
# NV_register_combiners2 (GL/glext.h:2260)
GL_PER_STAGE_CONSTANTS_NV = 34101 # GL/glext.h:2261
# NV_texture_compression_vtc (GL/glext.h:2264)
# NV_texture_rectangle (GL/glext.h:2267)
GL_TEXTURE_RECTANGLE_NV = 34037 # GL/glext.h:2268
GL_TEXTURE_BINDING_RECTANGLE_NV = 34038 # GL/glext.h:2269
GL_PROXY_TEXTURE_RECTANGLE_NV = 34039 # GL/glext.h:2270
GL_MAX_RECTANGLE_TEXTURE_SIZE_NV = 34040 # GL/glext.h:2271
# NV_texture_shader (GL/glext.h:2274)
GL_OFFSET_TEXTURE_RECTANGLE_NV = 34380 # GL/glext.h:2275
GL_OFFSET_TEXTURE_RECTANGLE_SCALE_NV = 34381 # GL/glext.h:2276
GL_DOT_PRODUCT_TEXTURE_RECTANGLE_NV = 34382 # GL/glext.h:2277
GL_RGBA_UNSIGNED_DOT_PRODUCT_MAPPING_NV = 34521 # GL/glext.h:2278
GL_UNSIGNED_INT_S8_S8_8_8_NV = 34522 # GL/glext.h:2279
GL_UNSIGNED_INT_8_8_S8_S8_REV_NV = 34523 # GL/glext.h:2280
GL_DSDT_MAG_INTENSITY_NV = 34524 # GL/glext.h:2281
GL_SHADER_CONSISTENT_NV = 34525 # GL/glext.h:2282
GL_TEXTURE_SHADER_NV = 34526 # GL/glext.h:2283
GL_SHADER_OPERATION_NV = 34527 # GL/glext.h:2284
GL_CULL_MODES_NV = 34528 # GL/glext.h:2285
GL_OFFSET_TEXTURE_MATRIX_NV = 34529 # GL/glext.h:2286
GL_OFFSET_TEXTURE_SCALE_NV = 34530 # GL/glext.h:2287
GL_OFFSET_TEXTURE_BIAS_NV = 34531 # GL/glext.h:2288
GL_OFFSET_TEXTURE_2D_MATRIX_NV = 34529 # GL/glext.h:2289
GL_OFFSET_TEXTURE_2D_SCALE_NV = 34530 # GL/glext.h:2290
GL_OFFSET_TEXTURE_2D_BIAS_NV = 34531 # GL/glext.h:2291
GL_PREVIOUS_TEXTURE_INPUT_NV = 34532 # GL/glext.h:2292
GL_CONST_EYE_NV = 34533 # GL/glext.h:2293
GL_PASS_THROUGH_NV = 34534 # GL/glext.h:2294
GL_CULL_FRAGMENT_NV = 34535 # GL/glext.h:2295
GL_OFFSET_TEXTURE_2D_NV = 34536 # GL/glext.h:2296
GL_DEPENDENT_AR_TEXTURE_2D_NV = 34537 # GL/glext.h:2297
GL_DEPENDENT_GB_TEXTURE_2D_NV = 34538 # GL/glext.h:2298
GL_DOT_PRODUCT_NV = 34540 # GL/glext.h:2299
GL_DOT_PRODUCT_DEPTH_REPLACE_NV = 34541 # GL/glext.h:2300
GL_DOT_PRODUCT_TEXTURE_2D_NV = 34542 # GL/glext.h:2301
GL_DOT_PRODUCT_TEXTURE_CUBE_MAP_NV = 34544 # GL/glext.h:2302
GL_DOT_PRODUCT_DIFFUSE_CUBE_MAP_NV = 34545 # GL/glext.h:2303
GL_DOT_PRODUCT_REFLECT_CUBE_MAP_NV = 34546 # GL/glext.h:2304
GL_DOT_PRODUCT_CONST_EYE_REFLECT_CUBE_MAP_NV = 34547 # GL/glext.h:2305
GL_HILO_NV = 34548 # GL/glext.h:2306
GL_DSDT_NV = 34549 # GL/glext.h:2307
GL_DSDT_MAG_NV = 34550 # GL/glext.h:2308
GL_DSDT_MAG_VIB_NV = 34551 # GL/glext.h:2309
GL_HILO16_NV = 34552 # GL/glext.h:2310
GL_SIGNED_HILO_NV = 34553 # GL/glext.h:2311
GL_SIGNED_HILO16_NV = 34554 # GL/glext.h:2312
GL_SIGNED_RGBA_NV = 34555 # GL/glext.h:2313
GL_SIGNED_RGBA8_NV = 34556 # GL/glext.h:2314
GL_SIGNED_RGB_NV = 34558 # GL/glext.h:2315
GL_SIGNED_RGB8_NV = 34559 # GL/glext.h:2316
GL_SIGNED_LUMINANCE_NV = 34561 # GL/glext.h:2317
GL_SIGNED_LUMINANCE8_NV = 34562 # GL/glext.h:2318
GL_SIGNED_LUMINANCE_ALPHA_NV = 34563 # GL/glext.h:2319
GL_SIGNED_LUMINANCE8_ALPHA8_NV = 34564 # GL/glext.h:2320
GL_SIGNED_ALPHA_NV = 34565 # GL/glext.h:2321
GL_SIGNED_ALPHA8_NV = 34566 # GL/glext.h:2322
GL_SIGNED_INTENSITY_NV = 34567 # GL/glext.h:2323
GL_SIGNED_INTENSITY8_NV = 34568 # GL/glext.h:2324
GL_DSDT8_NV = 34569 # GL/glext.h:2325
GL_DSDT8_MAG8_NV = 34570 # GL/glext.h:2326
GL_DSDT8_MAG8_INTENSITY8_NV = 34571 # GL/glext.h:2327
GL_SIGNED_RGB_UNSIGNED_ALPHA_NV = 34572 # GL/glext.h:2328
GL_SIGNED_RGB8_UNSIGNED_ALPHA8_NV = 34573 # GL/glext.h:2329
GL_HI_SCALE_NV = 34574 # GL/glext.h:2330
GL_LO_SCALE_NV = 34575 # GL/glext.h:2331
GL_DS_SCALE_NV = 34576 # GL/glext.h:2332
GL_DT_SCALE_NV = 34577 # GL/glext.h:2333
GL_MAGNITUDE_SCALE_NV = 34578 # GL/glext.h:2334
GL_VIBRANCE_SCALE_NV = 34579 # GL/glext.h:2335
GL_HI_BIAS_NV = 34580 # GL/glext.h:2336
GL_LO_BIAS_NV = 34581 # GL/glext.h:2337
GL_DS_BIAS_NV = 34582 # GL/glext.h:2338
GL_DT_BIAS_NV = 34583 # GL/glext.h:2339
GL_MAGNITUDE_BIAS_NV = 34584 # GL/glext.h:2340
GL_VIBRANCE_BIAS_NV = 34585 # GL/glext.h:2341
GL_TEXTURE_BORDER_VALUES_NV = 34586 # GL/glext.h:2342
GL_TEXTURE_HI_SIZE_NV = 34587 # GL/glext.h:2343
GL_TEXTURE_LO_SIZE_NV = 34588 # GL/glext.h:2344
GL_TEXTURE_DS_SIZE_NV = 34589 # GL/glext.h:2345
GL_TEXTURE_DT_SIZE_NV = 34590 # GL/glext.h:2346
GL_TEXTURE_MAG_SIZE_NV = 34591 # GL/glext.h:2347
# NV_texture_shader2 (GL/glext.h:2350)
GL_DOT_PRODUCT_TEXTURE_3D_NV = 34543 # GL/glext.h:2351
# NV_vertex_array_range2 (GL/glext.h:2354)
GL_VERTEX_ARRAY_RANGE_WITHOUT_FLUSH_NV = 34099 # GL/glext.h:2355
# NV_vertex_program (GL/glext.h:2358)
GL_VERTEX_PROGRAM_NV = 34336 # GL/glext.h:2359
GL_VERTEX_STATE_PROGRAM_NV = 34337 # GL/glext.h:2360
GL_ATTRIB_ARRAY_SIZE_NV = 34339 # GL/glext.h:2361
GL_ATTRIB_ARRAY_STRIDE_NV = 34340 # GL/glext.h:2362
GL_ATTRIB_ARRAY_TYPE_NV = 34341 # GL/glext.h:2363
GL_CURRENT_ATTRIB_NV = 34342 # GL/glext.h:2364
GL_PROGRAM_LENGTH_NV = 34343 # GL/glext.h:2365
GL_PROGRAM_STRING_NV = 34344 # GL/glext.h:2366
GL_MODELVIEW_PROJECTION_NV = 34345 # GL/glext.h:2367
GL_IDENTITY_NV = 34346 # GL/glext.h:2368
GL_INVERSE_NV = 34347 # GL/glext.h:2369
GL_TRANSPOSE_NV = 34348 # GL/glext.h:2370
GL_INVERSE_TRANSPOSE_NV = 34349 # GL/glext.h:2371
GL_MAX_TRACK_MATRIX_STACK_DEPTH_NV = 34350 # GL/glext.h:2372
GL_MAX_TRACK_MATRICES_NV = 34351 # GL/glext.h:2373
GL_MATRIX0_NV = 34352 # GL/glext.h:2374
GL_MATRIX1_NV = 34353 # GL/glext.h:2375
GL_MATRIX2_NV = 34354 # GL/glext.h:2376
GL_MATRIX3_NV = 34355 # GL/glext.h:2377
GL_MATRIX4_NV = 34356 # GL/glext.h:2378
GL_MATRIX5_NV = 34357 # GL/glext.h:2379
GL_MATRIX6_NV = 34358 # GL/glext.h:2380
GL_MATRIX7_NV = 34359 # GL/glext.h:2381
GL_CURRENT_MATRIX_STACK_DEPTH_NV = 34368 # GL/glext.h:2382
GL_CURRENT_MATRIX_NV = 34369 # GL/glext.h:2383
GL_VERTEX_PROGRAM_POINT_SIZE_NV = 34370 # GL/glext.h:2384
GL_VERTEX_PROGRAM_TWO_SIDE_NV = 34371 # GL/glext.h:2385
GL_PROGRAM_PARAMETER_NV = 34372 # GL/glext.h:2386
GL_ATTRIB_ARRAY_POINTER_NV = 34373 # GL/glext.h:2387
GL_PROGRAM_TARGET_NV = 34374 # GL/glext.h:2388
GL_PROGRAM_RESIDENT_NV = 34375 # GL/glext.h:2389
GL_TRACK_MATRIX_NV = 34376 # GL/glext.h:2390
GL_TRACK_MATRIX_TRANSFORM_NV = 34377 # GL/glext.h:2391
GL_VERTEX_PROGRAM_BINDING_NV = 34378 # GL/glext.h:2392
GL_PROGRAM_ERROR_POSITION_NV = 34379 # GL/glext.h:2393
GL_VERTEX_ATTRIB_ARRAY0_NV = 34384 # GL/glext.h:2394
GL_VERTEX_ATTRIB_ARRAY1_NV = 34385 # GL/glext.h:2395
GL_VERTEX_ATTRIB_ARRAY2_NV = 34386 # GL/glext.h:2396
GL_VERTEX_ATTRIB_ARRAY3_NV = 34387 # GL/glext.h:2397
GL_VERTEX_ATTRIB_ARRAY4_NV = 34388 # GL/glext.h:2398
GL_VERTEX_ATTRIB_ARRAY5_NV = 34389 # GL/glext.h:2399
GL_VERTEX_ATTRIB_ARRAY6_NV = 34390 # GL/glext.h:2400
GL_VERTEX_ATTRIB_ARRAY7_NV = 34391 # GL/glext.h:2401
GL_VERTEX_ATTRIB_ARRAY8_NV = 34392 # GL/glext.h:2402
GL_VERTEX_ATTRIB_ARRAY9_NV = 34393 # GL/glext.h:2403
GL_VERTEX_ATTRIB_ARRAY10_NV = 34394 # GL/glext.h:2404
GL_VERTEX_ATTRIB_ARRAY11_NV = 34395 # GL/glext.h:2405
GL_VERTEX_ATTRIB_ARRAY12_NV = 34396 # GL/glext.h:2406
GL_VERTEX_ATTRIB_ARRAY13_NV = 34397 # GL/glext.h:2407
GL_VERTEX_ATTRIB_ARRAY14_NV = 34398 # GL/glext.h:2408
GL_VERTEX_ATTRIB_ARRAY15_NV = 34399 # GL/glext.h:2409
GL_MAP1_VERTEX_ATTRIB0_4_NV = 34400 # GL/glext.h:2410
GL_MAP1_VERTEX_ATTRIB1_4_NV = 34401 # GL/glext.h:2411
GL_MAP1_VERTEX_ATTRIB2_4_NV = 34402 # GL/glext.h:2412
GL_MAP1_VERTEX_ATTRIB3_4_NV = 34403 # GL/glext.h:2413
GL_MAP1_VERTEX_ATTRIB4_4_NV = 34404 # GL/glext.h:2414
GL_MAP1_VERTEX_ATTRIB5_4_NV = 34405 # GL/glext.h:2415
GL_MAP1_VERTEX_ATTRIB6_4_NV = 34406 # GL/glext.h:2416
GL_MAP1_VERTEX_ATTRIB7_4_NV = 34407 # GL/glext.h:2417
GL_MAP1_VERTEX_ATTRIB8_4_NV = 34408 # GL/glext.h:2418
GL_MAP1_VERTEX_ATTRIB9_4_NV = 34409 # GL/glext.h:2419
GL_MAP1_VERTEX_ATTRIB10_4_NV = 34410 # GL/glext.h:2420
GL_MAP1_VERTEX_ATTRIB11_4_NV = 34411 # GL/glext.h:2421
GL_MAP1_VERTEX_ATTRIB12_4_NV = 34412 # GL/glext.h:2422
GL_MAP1_VERTEX_ATTRIB13_4_NV = 34413 # GL/glext.h:2423
GL_MAP1_VERTEX_ATTRIB14_4_NV = 34414 # GL/glext.h:2424
GL_MAP1_VERTEX_ATTRIB15_4_NV = 34415 # GL/glext.h:2425
GL_MAP2_VERTEX_ATTRIB0_4_NV = 34416 # GL/glext.h:2426
GL_MAP2_VERTEX_ATTRIB1_4_NV = 34417 # GL/glext.h:2427
GL_MAP2_VERTEX_ATTRIB2_4_NV = 34418 # GL/glext.h:2428
GL_MAP2_VERTEX_ATTRIB3_4_NV = 34419 # GL/glext.h:2429
GL_MAP2_VERTEX_ATTRIB4_4_NV = 34420 # | |
"""
Generate `pyi` from corresponding `rst` docs.
"""
import rst
from class_ import Class
from rst2pyi import RST2PyI
__author__ = rst.__author__
__copyright__ = rst.__copyright__
__license__ = rst.__license__
__version__ = "7.2.0" # Version set by https://github.com/hlovatt/tag2ver
def pyb(shed: RST2PyI) -> None:
_pyb(shed)
nxt = _accel(shed)
nxt = _adc(nxt, shed)
nxt = _can(nxt, shed)
nxt = _dac(nxt, shed)
nxt = _ext_int(nxt, shed)
nxt = _flash(nxt, shed)
nxt = _i2c(nxt, shed)
nxt = _lcd(nxt, shed)
nxt = _led(nxt, shed)
nxt = _pin(nxt, shed)
nxt = _rtc(nxt, shed)
nxt = _servo(nxt, shed)
nxt = _spi(nxt, shed)
nxt = _switch(nxt, shed)
nxt = _timer(nxt, shed)
nxt = _uart(nxt, shed)
nxt = _usb_hid(nxt, shed)
_usb_vcp(nxt, shed)
shed.write()
def _usb_vcp(this: str, shed: RST2PyI) -> None:
shed.class_from_file(
pre_str="# noinspection PyPep8Naming", old=this,
)
shed.def_(
old=r".. class:: pyb.USB_VCP(id=0)", new="def __init__(self, id: int = 0, /)",
)
shed.def_(
old=r".. method:: USB_VCP.init(*, flow=-1)",
new="def init(self, *, flow: int = - 1) -> int",
)
shed.def_(
old=r".. method:: USB_VCP.setinterrupt(chr)",
new="def setinterrupt(self, chr: int, /) -> None",
)
shed.def_(
old=r".. method:: USB_VCP.isconnected()", new="def isconnected(self) -> bool",
)
shed.def_(
old=r".. method:: USB_VCP.any()", new="def any(self) -> bool",
)
shed.def_(
old=r".. method:: USB_VCP.close()", new="def close(self) -> None",
)
shed.def_(
old=r".. method:: USB_VCP.read([nbytes])",
new=[
"def read(self) -> bytes | None",
"def read(self, nbytes, /) -> bytes | None",
],
)
shed.def_(
old=r".. method:: USB_VCP.readinto(buf, [maxlen])",
new=[
"def readinto(self, buf: AnyWritableBuf, /) -> int | None",
"def readinto(self, buf: AnyWritableBuf, maxlen: int, /) -> int | None",
],
)
shed.def_(
old=r".. method:: USB_VCP.readline()", new="def readline(self) -> bytes | None",
)
shed.def_(
old=r".. method:: USB_VCP.readlines()",
new="def readlines(self) -> list[bytes] | None",
)
shed.def_(
old=r".. method:: USB_VCP.write(buf)",
new="def write(self, buf: AnyReadableBuf, /) -> int",
)
shed.def_(
old=r".. method:: USB_VCP.recv(data, *, timeout=5000)",
new=[
"def recv(self, data: int, /, *, timeout: int = 5000) -> bytes | None",
"def recv(self, data: AnyWritableBuf, /, *, timeout: int = 5000) -> int | None",
],
)
shed.def_(
old=r".. method:: USB_VCP.send(data, *, timeout=5000)",
new="def send(self, buf: AnyWritableBuf | bytes | int, /, *, timeout: int = 5000) -> int",
)
shed.vars(
old=[".. data:: USB_VCP.RTS", "USB_VCP.CTS"], end=None,
)
def _usb_hid(this: str, shed: RST2PyI) -> str:
shed.class_from_file(
pre_str="# noinspection PyPep8Naming", old=this,
)
shed.def_(
old=r".. class:: pyb.USB_HID()", new="def __init__(self)",
)
shed.def_(
old=r".. method:: USB_HID.recv(data, *, timeout=5000)",
new=[
"def recv(self, data: int, /, *, timeout: int = 5000) -> bytes",
"def recv(self, data: AnyWritableBuf, /, *, timeout: int = 5000) -> int",
],
)
nxt = "pyb.USB_VCP.rst"
shed.def_(
old=r".. method:: USB_HID.send(data)",
new="def send(self, data: Sequence[int]) -> None",
end=nxt,
)
return nxt
def _uart(this: str, shed: RST2PyI) -> str:
shed.class_from_file(
pre_str="# noinspection PyShadowingNames", old=this,
)
shed.def_(
old=r".. class:: pyb.UART(bus, ...)",
new=[
"""
def __init__(
self,
bus: int | str,
/
)
""",
"""
def __init__(
self,
bus: int | str,
baudrate: int,
/,
bits: int = 8,
parity: int | None = None,
stop: int = 1,
*,
timeout: int = 0,
flow: int = 0,
timeout_char: int = 0,
read_buf_len: int = 64
)
""",
],
)
shed.def_(
old=(
r".. method:: UART.init(baudrate, bits=8, parity=None, stop=1, *, "
r"timeout=0, flow=0, timeout_char=0, read_buf_len=64)"
),
new="""
def init(
self,
baudrate: int,
/,
bits: int = 8,
parity: int | None = None,
stop: int = 1,
*,
timeout: int = 0,
flow: int = 0,
timeout_char: int = 0,
read_buf_len: int = 64
)
""",
)
shed.def_(
old=r".. method:: UART.deinit()", new="def deinit(self) -> None",
)
shed.def_(
old=r".. method:: UART.any()", new="def any(self) -> int",
)
shed.def_(
old=r".. method:: UART.read([nbytes])",
new=[
"def read(self) -> bytes | None",
"def read(self, nbytes: int, /) -> bytes | None",
],
)
shed.def_(
old=r".. method:: UART.readchar()", new="def readchar(self) -> int",
)
shed.def_(
old=r".. method:: UART.readinto(buf[, nbytes])",
new=[
"def readinto(self, buf: AnyWritableBuf, /) -> int | None",
"def readinto(self, buf: AnyWritableBuf, nbytes: int, /) -> int | None",
],
)
shed.def_(
old=r".. method:: UART.readline()", new="def readline(self) -> str | None",
)
shed.def_(
old=r".. method:: UART.write(buf)",
new="def write(self, buf: AnyWritableBuf, /) -> int | None",
)
shed.def_(
old=r".. method:: UART.writechar(char)",
new="def writechar(self, char: int, /) -> None",
)
shed.def_(
old=r".. method:: UART.sendbreak()", new="def sendbreak(self) -> None",
)
shed.vars(
old=[".. data:: UART.RTS", "UART.CTS"], end="Flow Control",
)
nxt = "pyb.USB_HID.rst"
shed.pyi.doc.extend(shed.extra_notes(end=nxt))
return nxt
def _timer_channel(*, old: str, end: str, shed: RST2PyI) -> None:
shed.consume_containing_line(old)
shed.consume_equals_underline_line()
shed.consume_blank_line()
methods = "Methods"
doc = []
for doc_line in shed.rst:
if doc_line.startswith(methods):
shed.consume_minuses_underline_line()
shed.consume_blank_line()
break
doc.append(f" {doc_line}\n")
else:
assert False, f"Did not find: `{methods}`"
new_class = Class()
new_class.class_def = f"class TimerChannel(ABC):"
new_class.doc = doc
shed.pyi.classes.append(new_class)
shed.def_(
old=".. method:: timerchannel.callback(fun)",
new="""
@abstractmethod
def callback(self, fun: Callable[[Timer], None] | None, /) -> None
""",
)
shed.def_(
old=".. method:: timerchannel.capture([value])",
new=[
"""
@abstractmethod
def capture(self) -> int
""",
"""
@abstractmethod
def capture(self, value: int, /) -> None
""",
],
)
shed.def_(
old=".. method:: timerchannel.compare([value])",
new=[
"""
@abstractmethod
def compare(self) -> int
""",
"""
@abstractmethod
def compare(self, value: int, /) -> None
""",
],
)
shed.def_(
old=".. method:: timerchannel.pulse_width([value])",
new=[
"""
@abstractmethod
def pulse_width(self) -> int
""",
"""
@abstractmethod
def pulse_width(self, value: int, /) -> None
""",
],
)
shed.def_(
old=".. method:: timerchannel.pulse_width_percent([value])",
new=[
"""
@abstractmethod
def pulse_width_percent(self) -> float
""",
"""
@abstractmethod
def pulse_width_percent(self, value: int | float, /) -> None
""",
],
end=end,
)
def _timer(this: str, shed: RST2PyI) -> str:
shed.class_from_file(
pre_str="# noinspection PyShadowingNames,PyUnresolvedReferences",
old=this,
post_doc='''
UP: ClassVar[int] = ...
"""
configures the timer to count from 0 to ARR (default).
"""
DOWN: ClassVar[int] = ...
"""
configures the timer to count from ARR down to 0.
"""
CENTER: ClassVar[int] = ...
"""
configures the timer to count from 0 to ARR and then back down to 0.
"""
PWM: ClassVar[int] = ...
"""
configure the timer in PWM mode (active high).
"""
PWM_INVERTED: ClassVar[int] = ...
"""
configure the timer in PWM mode (active low).
"""
OC_TIMING: ClassVar[int] = ...
"""
indicates that no pin is driven.
"""
OC_ACTIVE: ClassVar[int] = ...
"""
the pin will be made active when a compare match occurs (active is determined by polarity).
"""
OC_INACTIVE: ClassVar[int] = ...
"""
the pin will be made inactive when a compare match occurs.
"""
OC_TOGGLE: ClassVar[int] = ...
"""
the pin will be toggled when an compare match occurs.
"""
OC_FORCED_ACTIVE: ClassVar[int] = ...
"""
the pin is forced active (compare match is ignored).
"""
OC_FORCED_INACTIVE: ClassVar[int] = ...
"""
the pin is forced inactive (compare match is ignored).
"""
IC: ClassVar[int] = ...
"""
configure the timer in Input Capture mode.
"""
ENC_A: ClassVar[int] = ...
"""
configure the timer in Encoder mode. The counter only changes when CH1 changes.
"""
ENC_B: ClassVar[int] = ...
"""
configure the timer in Encoder mode. The counter only changes when CH2 changes.
"""
ENC_AB: ClassVar[int] = ...
"""
configure the timer in Encoder mode. The counter changes when CH1 or CH2 changes.
"""
HIGH: ClassVar[int] = ...
"""
output is active high.
"""
LOW: ClassVar[int] = ...
"""
output is active low.
"""
RISING: ClassVar[int] = ...
"""
captures on rising edge.
"""
FALLING: ClassVar[int] = ...
"""
captures on falling edge.
"""
BOTH: ClassVar[int] = ...
"""
captures on both edges.
"""
''',
)
shed.def_(
old=r".. class:: pyb.Timer(id, ...)",
new=[
"""
def __init__(
self,
id: int,
/
)
""",
"""
def __init__(
self,
id: int,
/,
*,
freq: int,
mode: int = UP,
div: int = 1,
callback: Callable[[Timer], None] | None = None,
deadtime: int = 0
)
""",
"""
def __init__(
self,
id: int,
/,
*,
prescaler: int,
period: int,
mode: int = UP,
div: int = 1,
callback: Callable[[Timer], None] | None = None,
deadtime: int = 0
)
""",
],
)
shed.def_(
old=r".. method:: Timer.init(*, freq, prescaler, period, mode=Timer.UP, div=1, callback=None, deadtime=0)",
new=[
"""
def init(
self,
*,
freq: int,
mode: int = UP,
div: int = 1,
callback: Callable[[Timer], None] | None = None,
deadtime: int = 0
) -> None
""",
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from skimage.util import view_as_windows
import warnings
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn_grad, math_grad
from collections import OrderedDict
SUPPORTED_ACTIVATIONS = [
'Relu', 'Elu', 'Sigmoid', 'Tanh', 'Softplus'
]
UNSUPPORTED_ACTIVATIONS = [
'CRelu', 'Relu6', 'Softsign'
]
_ENABLED_METHOD_CLASS = None
_GRAD_OVERRIDE_CHECKFLAG = 0
# -----------------------------------------------------------------------------
# UTILITY FUNCTIONS
# -----------------------------------------------------------------------------
def activation(type):
"""
Returns Tensorflow's activation op, given its type
:param type: string
:return: op
"""
if type not in SUPPORTED_ACTIVATIONS:
warnings.warn('Activation function (%s) not supported' % type)
f = getattr(tf.nn, type.lower())
return f
def original_grad(op, grad):
"""
Return original Tensorflow gradient for an op
:param op: op
:param grad: Tensor
:return: Tensor
"""
if op.type not in SUPPORTED_ACTIVATIONS:
warnings.warn('Activation function (%s) not supported' % op.type)
opname = '_%sGrad' % op.type
if hasattr(nn_grad, opname):
f = getattr(nn_grad, opname)
else:
f = getattr(math_grad, opname)
return f(op, grad)
# -----------------------------------------------------------------------------
# ATTRIBUTION METHODS BASE CLASSES
# -----------------------------------------------------------------------------
class AttributionMethod(object):
"""
Attribution method base class
"""
def __init__(self, T, X, xs, session, keras_learning_phase=None):
self.T = T
self.X = X
self.xs = xs
self.session = session
self.keras_learning_phase = keras_learning_phase
self.has_multiple_inputs = type(self.X) is list or type(self.X) is tuple
print ('Model with multiple inputs: ', self.has_multiple_inputs)
def session_run(self, T, xs):
feed_dict = {}
if self.has_multiple_inputs:
if len(xs) != len(self.X):
raise RuntimeError('List of input tensors and input data have different lengths (%s and %s)'
% (str(len(xs)), str(len(self.X))))
for k, v in zip(self.X, xs):
feed_dict[k] = v
else:
feed_dict[self.X] = xs
if self.keras_learning_phase is not None:
feed_dict[self.keras_learning_phase] = 0
return self.session.run(T, feed_dict)
def _set_check_baseline(self):
if self.baseline is None:
if self.has_multiple_inputs:
self.baseline = [np.zeros((1,) + xi.shape[1:]) for xi in self.xs]
else:
self.baseline = np.zeros((1,) + self.xs.shape[1:])
else:
if self.has_multiple_inputs:
for i, xi in enumerate(self.xs):
if self.baseline[i].shape == self.xs[i].shape[1:]:
self.baseline[i] = np.expand_dims(self.baseline[i], 0)
else:
raise RuntimeError('Baseline shape %s does not match expected shape %s'
% (self.baseline[i].shape, self.xs[i].shape[1:]))
else:
if self.baseline.shape == self.xs.shape[1:]:
self.baseline = np.expand_dims(self.baseline, 0)
else:
raise RuntimeError('Baseline shape %s does not match expected shape %s'
% (self.baseline.shape, self.xs.shape[1:]))
class GradientBasedMethod(AttributionMethod):
"""
Base class for gradient-based attribution methods
"""
def get_symbolic_attribution(self):
return tf.gradients(self.T, self.X)
def run(self):
attributions = self.get_symbolic_attribution()
results = self.session_run(attributions, self.xs)
return results[0] if not self.has_multiple_inputs else results
@classmethod
def nonlinearity_grad_override(cls, op, grad):
return original_grad(op, grad)
class PerturbationBasedMethod(AttributionMethod):
"""
Base class for perturbation-based attribution methods
"""
def __init__(self, T, X, xs, session, keras_learning_phase):
super(PerturbationBasedMethod, self).__init__(T, X, xs, session, keras_learning_phase)
self.base_activation = None
def _run_input(self, x):
return self.session_run(self.T, x)
def _run_original(self):
return self._run_input(self.xs)
def run(self):
raise RuntimeError('Abstract: cannot run PerturbationBasedMethod')
# -----------------------------------------------------------------------------
# ATTRIBUTION METHODS
# -----------------------------------------------------------------------------
"""
Returns zero attributions. For testing only.
"""
class DummyZero(GradientBasedMethod):
def get_symbolic_attribution(self,):
return tf.gradients(self.T, self.X)
@classmethod
def nonlinearity_grad_override(cls, op, grad):
input = op.inputs[0]
return tf.zeros_like(input)
"""
Saliency maps
https://arxiv.org/abs/1312.6034
"""
class Saliency(GradientBasedMethod):
def get_symbolic_attribution(self):
return [tf.abs(g) for g in tf.gradients(self.T, self.X)]
"""
Gradient * Input
https://arxiv.org/pdf/1704.02685.pdf - https://arxiv.org/abs/1611.07270
"""
class GradientXInput(GradientBasedMethod):
def get_symbolic_attribution(self):
return [g for g, x in zip(
tf.gradients(self.T, self.X),
self.X if self.has_multiple_inputs else [self.X])]
"""
Integrated Gradients
https://arxiv.org/pdf/1703.01365.pdf
"""
class IntegratedGradients(GradientBasedMethod):
def __init__(self, T, X, xs, session, keras_learning_phase, steps=100, baseline=None):
super(IntegratedGradients, self).__init__(T, X, xs, session, keras_learning_phase)
self.steps = steps
self.baseline = baseline
def run(self):
# Check user baseline or set default one
self._set_check_baseline()
attributions = self.get_symbolic_attribution()
gradient = None
for alpha in list(np.linspace(1. / self.steps, 1.0, self.steps)):
xs_mod = [b + (xs - b) * alpha for xs, b in zip(self.xs, self.baseline)] if self.has_multiple_inputs \
else self.baseline + (self.xs - self.baseline) * alpha
_attr = self.session_run(attributions, xs_mod)
if gradient is None: gradient = _attr
else: gradient = [g + a for g, a in zip(gradient, _attr)]
results = [g * (x - b) / self.steps for g, x, b in zip(
gradient,
self.xs if self.has_multiple_inputs else [self.xs],
self.baseline if self.has_multiple_inputs else [self.baseline])]
return results[0] if not self.has_multiple_inputs else results
"""
Layer-wise Relevance Propagation with epsilon rule
http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0130140
"""
class EpsilonLRP(GradientBasedMethod):
eps = None
def __init__(self, T, X, xs, session, keras_learning_phase, epsilon=1e-4):
super(EpsilonLRP, self).__init__(T, X, xs, session, keras_learning_phase)
assert epsilon > 0.0, 'LRP epsilon must be greater than zero'
global eps
eps = epsilon
def get_symbolic_attribution(self):
return [g * x for g, x in zip(
tf.gradients(self.T, self.X),
self.X if self.has_multiple_inputs else [self.X])]
@classmethod
def nonlinearity_grad_override(cls, op, grad):
output = op.outputs[0]
input = op.inputs[0]
return grad * output / (input + eps *
tf.where(input >= 0, tf.ones_like(input), -1 * tf.ones_like(input)))
"""
DeepLIFT
This reformulation only considers the "Rescale" rule
https://arxiv.org/abs/1704.02685
"""
class DeepLIFTRescale(GradientBasedMethod):
_deeplift_ref = {}
def __init__(self, T, X, xs, session, keras_learning_phase, baseline=None):
super(DeepLIFTRescale, self).__init__(T, X, xs, session, keras_learning_phase)
self.baseline = baseline
def get_symbolic_attribution(self):
return [g for g, x, b in zip(
tf.gradients(self.T, self.X),
self.X if self.has_multiple_inputs else [self.X],
self.baseline if self.has_multiple_inputs else [self.baseline])]
@classmethod
def nonlinearity_grad_override(cls, op, grad):
output = op.outputs[0]
input = op.inputs[0]
ref_input = cls._deeplift_ref[op.name]
ref_output = activation(op.type)(ref_input)
delta_out = output - ref_output
delta_in = input - ref_input
instant_grad = activation(op.type)(0.5 * (ref_input + input))
return tf.where(tf.abs(delta_in) > 1e-5, grad * delta_out / delta_in,
original_grad(instant_grad.op, grad))
def run(self):
# Check user baseline or set default one
self._set_check_baseline()
# Init references with a forward pass
self._init_references()
# Run the default run
return super(DeepLIFTRescale, self).run()
def _init_references(self):
# print ('DeepLIFT: computing references...')
sys.stdout.flush()
self._deeplift_ref.clear()
ops = []
g = tf.get_default_graph()
for op in g.get_operations():
if len(op.inputs) > 0 and not op.name.startswith('gradients'):
if op.type in SUPPORTED_ACTIVATIONS:
ops.append(op)
# print('DeepLIFT: here are the ops')
# print(ops)
YR = self.session_run([o.inputs[0] for o in ops], self.baseline)
for (r, op) in zip(YR, ops):
self._deeplift_ref[op.name] = r
# print('DeepLIFT: references ready')
sys.stdout.flush()
"""
Occlusion method
Generalization of the grey-box method presented in https://arxiv.org/pdf/1311.2901.pdf
This method performs a systematic perturbation of contiguous hyperpatches in the input,
replacing each patch with a user-defined value (by default 0).
window_shape : integer or tuple of length xs_ndim
Defines the shape of the elementary n-dimensional orthotope the rolling window view.
If an integer is given, the shape will be a hypercube of sidelength given by its value.
step : integer or tuple of length xs_ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
"""
class Occlusion(PerturbationBasedMethod):
def __init__(self, T, X, xs, session, keras_learning_phase, window_shape=None, step=None):
super(Occlusion, self).__init__(T, X, xs, session, keras_learning_phase)
if self.has_multiple_inputs:
raise RuntimeError('Multiple inputs not yet supported for perturbation methods')
input_shape = xs[0].shape
if window_shape is not None:
assert len(window_shape) == len(input_shape), \
'window_shape must have length of input (%d)' % len(input_shape)
self.window_shape = tuple(window_shape)
else:
self.window_shape = (1,) * len(input_shape)
if step is not None:
assert isinstance(step, int) or len(step) == len(input_shape), \
'step must be integer or tuple with the length of input (%d)' % len(input_shape)
self.step = step
else:
self.step = 1
self.replace_value = 0.0
print('Input shape: %s; window_shape %s; step %s' % (input_shape, self.window_shape, self.step))
def run(self):
self._run_original()
input_shape = self.xs.shape[1:]
batch_size = self.xs.shape[0]
total_dim = np.asscalar(np.prod(input_shape))
# Create mask
index_matrix = np.arange(total_dim).reshape(input_shape)
idx_patches = view_as_windows(index_matrix, self.window_shape, self.step).reshape((-1,) + self.window_shape)
heatmap = np.zeros_like(self.xs, dtype=np.float32).reshape((-1), total_dim)
w = np.zeros_like(heatmap)
# Compute original output
eval0 = self._run_original()
# Start perturbation loop
for i, p in enumerate(idx_patches):
mask = np.ones(input_shape).flatten()
mask[p.flatten()] = self.replace_value
masked_xs = mask.reshape((1,) + input_shape) * self.xs
delta = eval0 - self._run_input(masked_xs)
delta_aggregated = np.sum(delta.reshape((batch_size, -1)), -1, keepdims=True)
heatmap[:, p.flatten()] += delta_aggregated
w[:, p.flatten()] += p.size
attribution = np.reshape(heatmap / w, self.xs.shape)
if np.isnan(attribution).any():
warnings.warn('Attributions generated by Occlusion method contain nans, '
'probably because window_shape and step do not allow to cover the all input.')
return attribution
# -----------------------------------------------------------------------------
# END ATTRIBUTION METHODS
# -----------------------------------------------------------------------------
attribution_methods = OrderedDict({
'zero': (DummyZero, 0),
'saliency': (Saliency, 1),
'grad*input': (GradientXInput, 2),
'intgrad': (IntegratedGradients, 3),
'elrp': (EpsilonLRP, 4),
'deeplift': (DeepLIFTRescale, 5),
'occlusion': (Occlusion, 6)
})
@ops.RegisterGradient("DeepExplainGrad")
def deepexplain_grad(op, grad):
global _ENABLED_METHOD_CLASS, _GRAD_OVERRIDE_CHECKFLAG
_GRAD_OVERRIDE_CHECKFLAG = 1
if _ENABLED_METHOD_CLASS is not None \
and issubclass(_ENABLED_METHOD_CLASS, GradientBasedMethod):
return _ENABLED_METHOD_CLASS.nonlinearity_grad_override(op, grad)
else:
return original_grad(op, grad)
class DeepExplain(object):
def __init__(self, graph=None, session=tf.get_default_session()):
self.method = None
self.batch_size = None
self.session = session
self.graph = session.graph if graph is None else graph
self.graph_context = self.graph.as_default()
self.override_context = self.graph.gradient_override_map(self.get_override_map())
self.keras_phase_placeholder = None
self.context_on = False
if self.session is None:
raise RuntimeError('DeepExplain: could not retrieve a session. Use DeepExplain(session=your_session).')
def compile_func(self, inputs, outputs):
if (isinstance(inputs, list)==False):
print("Wrapping the inputs in a list...")
inputs = [inputs]
assert isinstance(inputs, list)
# remove possible None | |
<reponame>ulsdevteam/request_broker
import re
import inflect
import shortuuid
from request_broker import settings
from asnake.utils import get_date_display, get_note_text, text_in_note, resolve_to_uri
from ordered_set import OrderedSet
CONFIDENCE_RATIO = 97 # Minimum confidence ratio to match against.
OPEN_TEXT = ["Open for research", "Open for scholarly research"]
CLOSED_TEXT = ["Restricted"]
def get_container_indicators(item_json):
"""Returns container indicator(s) for an archival object.
Args:
item_json (dict): ArchivesSpace archival object information that has
resolved top containers and digital objects.
Returns:
string or None: A concatenated string containing the container type and
container indicator, or digital object title.
"""
indicators = []
if item_json.get("instances"):
for i in item_json.get("instances"):
if i.get("instance_type") == "digital_object":
indicators.append("Digital Object: {}".format(i.get("digital_object").get("_resolved").get("title")))
else:
top_container = i.get("sub_container").get("top_container").get("_resolved")
indicators.append("{} {}".format(top_container.get("type").capitalize(), top_container.get("indicator")))
return ", ".join(indicators)
else:
return None
def get_file_versions(digital_object):
"""Returns the file versions for an ArchivesSpace digital object.
Args:
digital_object (dict): Resolved json of an ArchivesSpace digital object.
Returns:
string: all file version uris associated with the digital object,
separated by a comma.
"""
return ", ".join([v.get("file_uri") for v in digital_object.get("file_versions")])
def get_locations(top_container_info):
"""Gets a string representation of a location for an ArchivesSpace top container.
Args:
top_container_info (dict): json for a top container (with resolved container locations)
Returns:
string: all locations associated with the top container, separated by a comma.
"""
def make_short_location(loc_data):
return ".".join([
loc_data.get("room", "").strip().replace("Vault ", ""),
loc_data.get("coordinate_1_indicator", "").strip(),
loc_data.get("coordinate_2_indicator", "").strip()])
locations = None
if top_container_info.get("container_locations"):
locations = ",".join([make_short_location(c["_resolved"]) for c in top_container_info.get("container_locations")])
return locations
def prepare_values(values_list):
"""Process an iterable of lists.
For each list in the initial iterable, removes None values, deduplicates and
returns either a string of joined list items or None.
Args:
values_list (iterable): an iterable in which each item is a list.
Returns:
values_list (tuple): processed values.
"""
for n, item in enumerate(values_list):
parsed = OrderedSet(filter(None, item))
values_list[n] = None if len(parsed) == 0 else ", ".join(list(parsed))
return tuple(values_list)
def get_instance_data(instance_list):
"""Creates a standardized tuple for each item in an instance list depending on
the item's instance type.
Args:
instance_list (list): A list of ArchivesSpace instance information with
resolved top containers and digital objects.
Returns:
tuple: a tuple containing instance type, indicator, location,
container barcode or digital object id, and container/digital object
ref for the instance.
"""
instance_types = []
containers = []
subcontainers = []
locations = []
barcodes = []
refs = []
for instance in instance_list:
if instance["instance_type"] == "digital_object":
instance_types.append("digital_object")
resolved = instance.get("digital_object").get("_resolved")
containers.append("Digital Object: {}".format(resolved.get("title")))
locations.append(get_file_versions(resolved))
barcodes.append(resolved.get("digital_object_id"))
refs.append(resolved.get("uri"))
else:
instance_types.append(instance["instance_type"])
sub_container = instance.get("sub_container")
top_container = instance.get("sub_container").get("top_container").get("_resolved")
containers.append("{} {}".format(top_container.get("type").capitalize(), top_container.get("indicator")))
locations.append(get_locations(top_container))
barcodes.append(top_container.get("barcode"))
refs.append(top_container["uri"])
if all(["type_2" in sub_container, "indicator_2" in sub_container]):
subcontainers.append("{} {}".format(sub_container.get("type_2").capitalize(), sub_container.get("indicator_2")))
return prepare_values([instance_types, containers, subcontainers, locations, barcodes, refs])
def get_preferred_format(item_json):
"""Gets the instance data for the preferred delivery format of the current archival
object.
Prioritizes digital objects, then microform, and then returns anything if there
is an instance.
Args:
item_json (dict): ArchivesSpace archival object information that has
resolved top containers and digital objects.
Returns:
preferred (tuple): a tuple containing concatenated information of the
preferred format retrieved by get_instance_data.
"""
preferred = None, None, None, None, None, None
if item_json.get("instances"):
instances = item_json.get("instances")
if any("digital_object" in obj for obj in instances):
preferred = get_instance_data([i for i in instances if i["instance_type"] == "digital_object"])
elif any(obj.get("instance_type") == "microform" for obj in instances):
preferred = get_instance_data([i for i in instances if i["instance_type"] == "microform"])
else:
preferred = get_instance_data([i for i in instances])
return preferred
def get_rights_info(item_json, client):
"""Gets rights status and text for an archival object.
If no parseable rights status is available, it is assumed the item is open.
Args:
item_json (dict): json for an archival object
client: an ASnake client
Returns:
status, text: A tuple containing the rights status and text. Status is
one of "closed", "conditional" or "open". Text is either None or a string
describing the restriction.
"""
status = get_rights_status(item_json, client)
if not status:
for ancestor in item_json["ancestors"]:
status = get_rights_status(ancestor["_resolved"], client)
if status:
break
text = get_rights_text(item_json, client)
if not text:
for ancestor in item_json["ancestors"]:
text = get_rights_text(ancestor["_resolved"], client)
if text:
break
return status if status else "open", text
def get_rights_status(item_json, client):
"""Determines restrictions status for an archival object.
Evaluates an object's rights statements and accessrestrict notes (in that order)
to determine if restrictions have been explicitly set on the archival object.
Returns None if restrictions cannot be parsed from those three sources.
Args:
item_json (dict): json for an archival object
client: an ASnake client
Returns:
status: One of "closed", "conditional", "open", None
"""
status = None
if item_json.get("rights_statements"):
for stmnt in item_json["rights_statements"]:
if any([act["restriction"].lower() == "disallow" for act in stmnt.get("acts", [])]):
status = "closed"
elif any([act["restriction"].lower() == "conditional" for act in stmnt.get("acts", [])]):
status = "conditional"
elif [n for n in item_json.get("notes", []) if n.get("type") == "accessrestrict"]:
notes = [n for n in item_json["notes"] if n.get("type") == "accessrestrict"]
if any([text_in_note(n, text, client, confidence=CONFIDENCE_RATIO) for text in CLOSED_TEXT for n in notes]):
status = "closed"
if any([text_in_note(n, text, client, confidence=CONFIDENCE_RATIO) for text in OPEN_TEXT for n in notes]):
status = "open"
elif any([text_in_note(n, text, client, confidence=CONFIDENCE_RATIO) for text in OPEN_TEXT for n in notes]):
status = "open"
else:
status = "conditional"
return status
def get_rights_text(item_json, client):
"""Fetches text describing restrictions on an archival object.
Args:
item_json (dict): json for an archival object (with resolved ancestors)
Returns:
string: note content of a conditions governing access that indicates a restriction
"""
text = None
if [n for n in item_json.get("notes", []) if (n.get("type") == "accessrestrict" and n["publish"])]:
text = ", ".join(
[", ".join(get_note_text(n, client)) for n in item_json["notes"] if (n.get("type") == "accessrestrict" and n["publish"])])
elif item_json.get("rights_statements"):
string = ""
for stmnt in item_json["rights_statements"]:
for note in stmnt["notes"]:
string += ", ".join(note["content"])
text = string if string else None
return text
def get_resource_creators(resource):
"""Gets all creators of a resource record and concatenate them into a string
separated by commas.
Args:
resource (dict): resource record data.
Returns:
creators (string): comma-separated list of resource creators.
"""
creators = []
if resource.get("linked_agents"):
for linked_agent in resource.get("linked_agents"):
if linked_agent.get("role") == "creator":
creators.append(linked_agent.get("_resolved").get('display_name').get('sort_name'))
return ", ".join(creators)
def get_dates(archival_object, client):
"""Gets human-readable dates of an archival object.
Args:
archival_object (dict): json for an archival object
Returns:
string: all dates associated with an archival object, separated by a comma
"""
dates = [get_date_display(d, client) for d in archival_object.get("dates", [])]
return ", ".join(dates) if len(dates) else None
def get_size(instances):
"""Attempts to parse extents from instances.
Initially, child subcontainers are parsed to determine
the extent number and extent type. If a child subcontainer does not
exist, the parent container is parsed.
"""
def append_to_list(extents, extent_type, extent_number):
"""Merges or appends extent objects to an extent list.
Only operates over instances with a sub_container (i.e. skips
digital object instances).
Args:
extents (list): a list of extents to update.
extent_type (str): the extent type to add.
extent_number (int): the extent number to add
"""
matching_extents = [e for e in extents if e["extent_type"] == extent_type]
if matching_extents:
matching_extents[0]["number"] += extent_number
else:
extents.append({"extent_type": extent_type, "number": extent_number})
return extents
extents = []
for instance in [i for i in instances if i.get("sub_container")]:
try:
sub_container_parseable = all(i_type in instance.get("sub_container", {}) for i_type in ["indicator_2", "type_2"])
if sub_container_parseable:
number_list = [i.strip() for i in instance["sub_container"]["indicator_2"].split("-")]
range = sorted(map(indicator_to_integer, number_list))
extent_type = instance["sub_container"]["type_2"]
extent_number = range[-1] - range[0] + 1 if len(range) > 1 else 1
else:
instance_type = instance["instance_type"].lower()
sub_container_type = instance["sub_container"]["top_container"]["_resolved"].get("type", "").lower()
extent_type = "{} {}".format(instance_type, sub_container_type) if sub_container_type != "box" else sub_container_type
extent_number = 1
extents = append_to_list(extents, extent_type.strip(), extent_number)
except Exception as e:
raise Exception("Error parsing instances") from e
return ", ".join(
["{} {}".format(
e["number"], inflect.engine().plural(e["extent_type"], e["number"])) for e in extents])
def get_parent_title(obj_json):
"""Returns the title for an object's parent component.
If a component identifier is present, appends that identifier plus the
object's level.
"""
title = obj_json.get("title", obj_json.get("display_string")).strip()
if obj_json.get("component_id"):
title = "{}, {} {}".format(title, obj_json["level"].capitalize(), obj_json["component_id"])
return title
def get_url(obj_json, host, client):
"""Returns a full URL for an object."""
uuid = shortuuid.uuid(name=obj_json["uri"])
return "{}/collections/{}".format(host, uuid) if has_children(obj_json, client) else "{}/objects/{}".format(host, uuid)
def has_children(obj_json, client):
"""Checks whether an archival object has children using the tree/node endpoint."""
| |
<filename>pixel_perturbation.py
from torchvision import datasets, transforms, utils, models
from misc_functions import *
from gradcam import grad_cam
from functools import reduce
from saliency.inputgradient import Inputgrad
import gc
import argparse
import matplotlib.pyplot as plt
import numpy as np
import torch.nn.functional as F
from models.resnet import *
from models.vgg import *
# PATH variables
PATH = os.path.dirname(os.path.abspath(__file__)) + '/'
data_PATH = PATH + 'dataset/'
result_path = PATH + 'results/'
# get unnormalize object for plotting saliency maps
unnormalize = unnormalize()
# same transformations for each dataset
transform_standard = transform()
# prevents F.interpolate from random behaviour which caused Cuda memory errors
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# HELPER FUNCTIONS
def print_dict(dictionary, div=False):
"""
For Debugging purposes
:param dictionary:
:param div:
:return:
"""
for k, v in dictionary.items():
print("Gradient method: {}".format(k))
if div is True:
print("KL Div: {}".format(v[0]))
else:
print("Mean: {}, Std: {}".format(v[0], v[1]))
def kl_div(P, Q):
"""
returns actual KL divergence. torch.nn.functional.kl_div returns different values
:param P: discrete distribution
:param Q: discrete distribution
:return:
"""
kl = (P * (P / Q).log()).sum(1)
return kl
def compute_difference_metrics(initial_output, final_output, tmp_results):
"""
Given NN output before and after removal, all absolute difference metrics are stored.
:param initial_output: model.forward(x)
:param final_output: model.forward(x_salient_pixels_removed)
:param tmp_results: object to store all temporary results
:return:
"""
# get probabilities instead of output scores
initial_probabilities = F.softmax(initial_output, dim=1)
final_probabilities = F.softmax(final_output, dim=1)
# compute kl divergence on probability vectors
kldiv = kl_div(final_probabilities, initial_probabilities)
# score differences
tmp_score_diffs = get_temp_result(initial_output, final_output)
# prob differences
tmp_prob_diffs = get_temp_result(initial_probabilities, final_probabilities)
# changes of mean class score other classes based on topk classes, otherwise division by zero
tmp_other_diffs = get_other_class_change(initial_output, final_output, ARGS.topk)
tmp_other_probs_diffs = get_other_class_change(initial_probabilities, final_probabilities, ARGS.topk)
# max change in other classes than most confident one
tmp_other_max_diffs = tmp_other_diffs.max(1)[0]
tmp_other_max_prob_diffs = tmp_other_probs_diffs.max(1)[0]
# save per image
tmp_results[0].append(np.round(tmp_score_diffs.tolist(), 8))
tmp_results[1].append(np.round(tmp_prob_diffs.tolist(), 8))
tmp_results[2].append(np.round(kldiv.tolist(), 8))
tmp_results[3].append(np.round(tmp_other_diffs.tolist(), 8))
tmp_results[4].append(np.round(tmp_other_probs_diffs.tolist(), 8))
tmp_results[5].append(np.round(tmp_other_max_diffs.tolist(), 8))
tmp_results[6].append(np.round(tmp_other_max_prob_diffs.tolist(), 8))
return tmp_results
def initialize_grad_and_model(grad_type, model_name, device):
"""
Given a saliency method (grad_type), the exact modelname (vgg16, vgg16_bn,...) and the device, returns model and
grad object
:param grad_type:
:param model_name:
:param device:
:return:
"""
model, grad = None, None
if grad_type == 'gradcam':
# Gradcam
model, grad = initialize_grad_cam(model_name, device)
else:
model = initialize_fullgrad(model_name, device)
# same model for Inputgrad and Fullgrad, different grad object
if grad_type == "fullgrad":
# Initialize Gradient objects
grad = FullGrad(model)
elif grad_type == "inputgrad":
grad = Inputgrad(model)
return model, grad
def get_temp_result(initial_out, final_out):
"""
Takes NN outputs before and after removal to compute absolute fractional differences
:param initial_out:
:param final_out:
:return:
"""
# initially most confident class
initial_class_scores, predicted_class = initial_out.max(1)
# same value after modification
final_class_scores = final_out.index_select(1, predicted_class).max(0)[0]
# absolute fractional difference of raw results
tmp_result = abs(final_class_scores - initial_class_scores) / initial_class_scores
return tmp_result
def get_other_class_change(initial_out, final_out, topk):
"""
Takes NN output before and after removal and the number of most confident classes. Then gets topk classes other than
the most confident class before and computes bs fractional differences to after removal.
and
:param initial_out:
:param final_out:
:param topk:
:return:
"""
other_initial_scores = get_topk_other_scores(initial_out, initial_out, topk)
other_final_scores = get_topk_other_scores(initial_out, final_out, topk)
# absolute fractional difference of raw results
tmp_result = abs(other_initial_scores - other_final_scores) / other_initial_scores
return tmp_result
def get_other_classes_scores(initial_out, final_out):
"""
Takes NN output before and after removal, removes most confident class before removal and considers all other classes
to return respective tensors without most confident class.
:param initial_out:
:param final_out:
:return:
"""
# most confident class before removal
_, predicted_class = initial_out.max(1)
# did not find an easier method to remove the most confident class from tensor
ind_tensor = torch.LongTensor(initial_out.size()[0] * [list(range(1000))]).to("cpu")
new_tensor = torch.LongTensor(initial_out.size()[0] * [[1] * 999]).to("cpu")
final_class_scores = torch.zeros(initial_out.size()[0], 999)
# remove predicted class
for i in range(len(predicted_class)):
new_tensor[i] = np.delete(ind_tensor[i], predicted_class[i], None)
final_class_scores[i] = final_out[i, new_tensor[i]]
return final_class_scores
def get_topk_other_scores(initial_out, final_out, topk):
"""
Takes NN output before and after removal, gets topk other class scores and returns topk tensors without topk
most confident classes.
:param initial_out:
:param final_out:
:param topk:
:return:
"""
other_initial_scores = get_other_classes_scores(initial_out, initial_out)
other_final_scores = get_other_classes_scores(initial_out, final_out)
topk_scores, topk_ind = torch.topk(other_initial_scores, k=topk)
final_class_scores = torch.zeros(other_initial_scores.size()[0], topk)
# remove predicted class
for i in range(len(topk_ind)):
final_class_scores[i] = other_final_scores[i, topk_ind[i]]
return final_class_scores
def get_max_other_class_change(initial_output, final_output):
"""
Takes NN output before and after removal, and computes max ab fract change of the most confident of the topk classes
other than the initially most confident class.
:param initial_output:
:param final_output:
:return:
"""
other_initial_scores = get_other_classes_scores(initial_output, initial_output)
other_final_scores = get_other_classes_scores(initial_output, final_output)
changes = abs(other_initial_scores - other_final_scores) / other_initial_scores
tmp_result = changes.max(1)
return tmp_result
def append_mean_std(tmp_results, means, stds):
"""
Append batch means to mean list that keeps track the mean and stds for each k value.
:param tmp_results:
:param means:
:param stds:
:return:
"""
means.append(np.round(np.mean(tmp_results), 8))
stds.append(np.round(np.std(tmp_results), 8))
def plot_all_grads(results_dict, filename=None):
"""
Plots all metrics for one specification (model and which kind of removal)
:param results_dict:
:param filename:
:param div:
:return:
"""
plt.figure()
axes = plt.gca()
# axes.set_xlim([0, ARGS.k[-1]*100])
axes.set_xlabel('% pixels removed')
axes.set_ylabel('Absolute fractional output change')
x_labels = [i * 100 for i in ARGS.k]
for key, v in results_dict.items():
# Plot the mean and variance of the predictive distribution on the 100000 data points.
plt.plot(ARGS.k, np.array(v[0]), linewidth=1.2, label=str(key))
plt.fill_between(ARGS.k, np.array(v[0]) - np.array(v[1]), np.array(v[0]) + np.array(v[1]), alpha=1 / 3)
plt.xticks(ARGS.k, x_labels, rotation=45)
plt.tight_layout()
plt.legend()
#plt.savefig(filename + ".png")
plt.show()
def initialize_grad_cam(model_name, device, pretrained=True):
"""
Gradcam needs original torch model object to train on. Therefore different initialization than fullgrad.
:param model_name:
:param device:
:param pretrained:
:return:
"""
model = models.__dict__[model_name](pretrained=pretrained)
model.to(device)
model.eval()
gcam = grad_cam.GradCAM(model=model)
return model, gcam
def initialize_fullgrad(model_name, device):
"""
Initializes Fullgrad Object given modelname and device.
:param model_name:
:param device:
:return:
"""
model = eval(model_name)(pretrained=True)
model = model.to(device)
model.eval()
return model
def compute_saliency_per_grad(grad_type, grad, data, target_layer, target_class=None):
"""
Given the grad_type, computes saliency maps for a batch of images: data.
:param grad_type:
:param grad:
:param data:
:param target_layer: vgg: features, resnet: layer4
:param target_class: To visualize wrong predictions take original target
:return:
"""
saliency = None
if grad_type == "fullgrad" or grad_type == "inputgrad":
# print("calculating saliency")
saliency = grad.saliency(data, target_class=target_class)
elif grad_type == "gradcam":
probs, ids = grad.forward(data)
# Grad-CAM
grad.backward(ids=ids[:, [0]])
saliency = grad.generate(target_layer=target_layer)
return saliency
def get_filename(result_path, grad_type, index):
model_name = ARGS.model + ARGS.model_type
filename = result_path + "/" + grad_type + "_" + model_name + "_" + str(index) + ".png"
return filename
def save_saliency_map_batch(saliency, data, result_path, grad_type, index):
"""
Save all saliency maps of a batch of images.
:param saliency:
:param data:
:param result_path:
:param grad_type:
:param index:
:return:
"""
for i in range(len(data)):
im = unnormalize(data[i, :, :, :].cpu())
im = im.view(1, 3, 224, 224)[-1, :, :, :]
reg = saliency[i, :, :, :]
filename = get_filename(result_path, grad_type, index)
# print("filename:{}".format(filename))
save_saliency_map(im, reg, filename)
def print_memory():
"""
print all cuda tensors for memory leakage issues.
:return:
"""
total = 0
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
print(type(obj), obj.size())
if len(obj.size()) > 0:
if obj.type() == 'torch.cuda.FloatTensor':
total += reduce(lambda x, y: x * y, obj.size()) * 32
elif obj.type() == 'torch.cuda.LongTensor':
total += reduce(lambda x, y: x * y, obj.size()) * 64
elif obj.type() == 'torch.cuda.IntTensor':
total += reduce(lambda x, y: x * y, obj.size()) * 32
# else:
# Few non-cuda tensors in my case from dataloader
except Exception as e:
pass
print("{} GB".format(total / ((1024 ** 3) * 8)))
def get_sample_loader():
dataset = data_PATH
sample_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(dataset, transform=transform_standard),
batch_size=ARGS.batch_size, shuffle=False)
return sample_loader
def get_salient_type():
if ARGS.most_salient == "True":
salient_type = "most"
else:
salient_type = "least"
return salient_type
def print_results(all_results):
print("For K values: {}".format(ARGS.k))
print("############ Score absolute fractional differences ############")
print_dict(all_results[0])
print("############ Probs absolute fractional differences ############")
print_dict(all_results[1])
print("KL divergences per k")
print_dict(all_results[2])
print("############ Top: {} Other Score absolute fractional differences ############".format(ARGS.topk))
print_dict(all_results[3])
print("############ Top: {} Other Probs absolute fractional differences ############".format(ARGS.topk))
print_dict(all_results[4])
print("############ Max Other Score absolute fractional differences ############")
print_dict(all_results[5])
print("############ Max other Probs absolute fractional differences ############")
print_dict(all_results[6])
def initialize_means_std_dict():
means_std_dict= {}
means_std_dict["score_means"] = []
means_std_dict["score_stds"] = []
means_std_dict["prob_means"] = []
means_std_dict["prob_stds"] = []
means_std_dict["kl_div_means"] = []
means_std_dict["kl_div_stds"] = []
means_std_dict["other_score_means"] = []
means_std_dict["other_score_stds"] = []
means_std_dict["other_prob_means"] = []
means_std_dict["other_prob_stds"] = []
means_std_dict["other_max_score_means"] = | |
<reponame>gefux/OQuPy
# Copyright 2022 The TEMPO Collaboration
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module on for the original time evolving matrix product operator (TEMPO)
algorithm. This module is based on [Strathearn2017] and [Strathearn2018].
**[Strathearn2017]**
<NAME>, <NAME>, and <NAME>, *Efficient real-time path integrals
for non-Markovian spin-boson models*. New Journal of Physics, 19(9),
p.093009 (2017).
**[Strathearn2018]**
<NAME>, <NAME>, <NAME>, <NAME> and
<NAME>, *Efficient non-Markovian quantum dynamics using
time-evolving matrix product operators*, Nat. Commun. 9, 3322 (2018).
"""
import sys
from typing import Callable, Dict, Optional, Text
import warnings
from copy import copy
import numpy as np
from numpy import ndarray
from scipy.linalg import expm
from oqupy.correlations import BaseCorrelations
from oqupy.bath import Bath
from oqupy.base_api import BaseAPIClass
from oqupy.config import NpDtype, MAX_DKMAX, DEFAULT_TOLERANCE
from oqupy.config import TEMPO_BACKEND_CONFIG
from oqupy.dynamics import Dynamics
from oqupy.system import BaseSystem
from oqupy.tempo.backends.tempo_backend import TempoBackend
from oqupy.operators import commutator, acommutator
from oqupy.util import get_progress
class TempoParameters(BaseAPIClass):
r"""
Parameters for the TEMPO computation.
Parameters
----------
dt: float
Length of a time step :math:`\delta t`. - It should be small enough
such that a trotterisation between the system Hamiltonian and the
environment it valid, and the environment auto-correlation function
is reasonably well sampled.
dkmax: int
Number of time steps :math:`K\in\mathbb{N}` that should be included in
the non-Markovian memory. - It must be large
enough such that :math:`\delta t \times K` is larger than the
necessary memory time :math:`\tau_\mathrm{cut}`.
epsrel: float
The maximal relative error in the singular value truncation (done
in the underlying tensor network algorithm). - It must be small enough
such that the numerical compression (using tensor network algorithms)
does not truncate relevant correlations.
add_correlation_time: float
Additional correlation time to include in the last influence
functional as explained in [Strathearn2017].
name: str (default = None)
An optional name for the tempo parameters object.
description: str (default = None)
An optional description of the tempo parameters object.
"""
def __init__(
self,
dt: float,
dkmax: int,
epsrel: float,
add_correlation_time: Optional[float] = None,
name: Optional[Text] = None,
description: Optional[Text] = None) -> None:
"""Create a TempoParameters object."""
self.dt = dt
self.dkmax = dkmax
self.epsrel = epsrel
self.add_correlation_time = add_correlation_time
super().__init__(name, description)
def __str__(self) -> Text:
ret = []
ret.append(super().__str__())
ret.append(" dt = {} \n".format(self.dt))
ret.append(" dkmax = {} \n".format(self.dkmax))
ret.append(" epsrel = {} \n".format(self.epsrel))
ret.append(" add_correlation_time = {} \n".format(
self.add_correlation_time))
return "".join(ret)
@property
def dt(self) -> float:
"""Length of a time step."""
return self._dt
@dt.setter
def dt(self, new_dt: float) -> None:
try:
tmp_dt = float(new_dt)
except Exception as e:
raise AssertionError("Argument 'dt' must be float.") from e
assert tmp_dt > 0.0, \
"Argument 'dt' must be bigger than 0."
self._dt = tmp_dt
@property
def dkmax(self) -> float:
"""Number of time steps that should be included in the non-Markovian
memory. """
return self._dkmax
@dkmax.setter
def dkmax(self, new_dkmax: float) -> None:
try:
if new_dkmax is None:
tmp_dkmax = None
else:
tmp_dkmax = int(new_dkmax)
except Exception as e:
raise AssertionError("Argument 'dkmax' must be int or None.") \
from e
assert tmp_dkmax is None or tmp_dkmax > 0, \
"Argument 'dkmax' must be bigger than or equal to 0 or None."
self._dkmax = tmp_dkmax
@dkmax.deleter
def dkmax(self) -> None:
self._dkmax = None
@property
def epsrel(self) -> float:
"""The maximal relative error in the singular value truncation."""
return self._epsrel
@epsrel.setter
def epsrel(self, new_epsrel: float) -> None:
try:
tmp_epsrel = float(new_epsrel)
except Exception as e:
raise AssertionError("Argument 'epsrel' must be float.") from e
assert tmp_epsrel > 0.0, \
"Argument 'epsrel' must be bigger than 0."
self._epsrel = tmp_epsrel
@property
def add_correlation_time(self) -> float:
"""
Additional correlation time to include in the last influence
functional.
"""
return self._add_correlation_time
@add_correlation_time.setter
def add_correlation_time(self, new_tau: Optional[float] = None) -> None:
if new_tau is None:
del self.add_correlation_time
else:
# check input: cutoff
try:
tmp_new_tau = float(new_tau)
except Exception as e:
raise AssertionError( \
"Additional correlation time must be a float.") from e
if tmp_new_tau < 0:
raise ValueError(
"Additional correlation time must be non-negative.")
self._add_correlation_time = tmp_new_tau
@add_correlation_time.deleter
def add_correlation_time(self) -> None:
self._add_correlation_time = None
class Tempo(BaseAPIClass):
"""
Class representing the entire TEMPO tensornetwork as introduced in
[Strathearn2018].
Parameters
----------
system: BaseSystem
The system.
bath: Bath
The Bath (includes the coupling operator to the system).
parameters: TempoParameters
The parameters for the TEMPO computation.
initial_state: ndarray
The initial density matrix of the system.
start_time: float
The start time.
backend: str (default = None)
The name of the backend to use for the computation. If
`backend` is ``None`` then the default backend is used.
backend_config: dict (default = None)
The configuration of the backend. If `backend_config` is
``None`` then the default backend configuration is used.
name: str (default = None)
An optional name for the tempo object.
description: str (default = None)
An optional description of the tempo object.
"""
def __init__(
self,
system: BaseSystem,
bath: Bath,
parameters: TempoParameters,
initial_state: ndarray,
start_time: float,
backend_config: Optional[Dict] = None,
name: Optional[Text] = None,
description: Optional[Text] = None) -> None:
"""Create a Tempo object. """
assert isinstance(system, BaseSystem), \
"Argument 'system' must be an instance of BaseSystem."
self._system = system
assert isinstance(bath, Bath), \
"Argument 'bath' must be an instance of Bath."
self._bath = bath
self._correlations = self._bath.correlations
assert isinstance(parameters, TempoParameters), \
"Argument 'parameters' must be an instance of TempoParameters."
self._parameters = parameters
try:
tmp_initial_state = np.array(initial_state, dtype=NpDtype)
tmp_initial_state.setflags(write=False)
except Exception as e:
raise AssertionError("Initial state must be numpy array.") from e
assert len(tmp_initial_state.shape) == 2, \
"Initial state is not a matrix."
assert tmp_initial_state.shape[0] == \
tmp_initial_state.shape[1], \
"Initial state is not a square matrix."
self._initial_state = tmp_initial_state
self._dimension = self._initial_state.shape[0]
try:
tmp_start_time = float(start_time)
except Exception as e:
raise AssertionError("Start time must be a float.") from e
self._start_time = tmp_start_time
if backend_config is None:
self._backend_config = TEMPO_BACKEND_CONFIG
else:
self._backend_config = backend_config
assert self._bath.dimension == self._dimension and \
self._system.dimension == self._dimension, \
"Hilbertspace dimensions are unequal: " \
+ "system ({}), ".format(self._system.dimension) \
+ "initial state ({}), ".format(self._dimension) \
+ "and bath coupling ({}), ".format(self._bath.dimension)
super().__init__(name, description)
tmp_coupling_comm = commutator(self._bath._coupling_operator)
tmp_coupling_acomm = acommutator(self._bath._coupling_operator)
self._coupling_comm = tmp_coupling_comm.diagonal()
self._coupling_acomm = tmp_coupling_acomm.diagonal()
self._dynamics = None
self._backend_instance = None
self._init_tempo_backend()
def _init_tempo_backend(self):
"""Create and initialize the tempo backend. """
dim = self._dimension
initial_state = self._initial_state.reshape(dim**2)
influence = self._influence
unitary_transform = self._bath.unitary_transform
propagators = self._propagators
sum_north = np.array([1.0]*(dim**2))
sum_west = np.array([1.0]*(dim**2))
dkmax = self._parameters.dkmax
epsrel = self._parameters.epsrel
self._backend_instance = TempoBackend(
initial_state,
influence,
unitary_transform,
propagators,
sum_north,
sum_west,
dkmax,
epsrel,
config=self._backend_config)
def _init_dynamics(self):
"""Create a Dynamics object with metadata from the Tempo object. """
name = None
description = "computed from '{}' tempo".format(self.name)
self._dynamics = Dynamics(name=name,
description=description)
def _influence(self, dk: int):
"""Create the influence functional matrix for a time step distance
of dk. """
return influence_matrix(
dk,
parameters=self._parameters,
correlations=self._correlations,
coupling_acomm=self._coupling_acomm,
coupling_comm=self._coupling_comm)
def _propagators(self, step: int):
"""Create the system propagators (first and second half) for the time
step `step`. """
dt = self._parameters.dt
t = self._time(step)
first_step = expm(self._system.liouvillian(t+dt/4.0)*dt/2.0)
second_step = expm(self._system.liouvillian(t+dt*3.0/4.0)*dt/2.0)
return first_step, second_step
def _time(self, step: int):
"""Return the time that corresponds to the time step `step`. """
return self._start_time + float(step)*self._parameters.dt
@property
def dimension(self) -> ndarray:
"""Hilbert space dimension. """
return copy(self._dimension)
def compute(
self,
end_time: float,
progress_type: Text = None) -> Dynamics:
"""
Propagate (or continue to propagate) the TEMPO tensor network to
time `end_time`.
Parameters
----------
end_time: float
The time to which the TEMPO should be computed.
progress_type: str (default = None)
The progress report type during the computation. Types are:
{``'silent'``, ``'simple'``, ``'bar'``}. If `None` then
the default progress type is used.
Returns
-------
dynamics: Dynamics
The instance of Dynamics associated with the TEMPO object.
"""
try:
tmp_end_time = float(end_time)
except Exception as e:
raise AssertionError("End time must be | |
"""Find template-based model files, read columns, convert them to n-dim hypercubes, store as HDF5.
"""
import os, sys, re, warnings
import numpy as N
import h5py
#from externals.padarray import padarray
import filefuncs
__author__ = "<NAME> <<EMAIL>>"
__version__ = "20180921"
# TODO: add simple logging
class HdfFile:
def __init__(self,hdffile,mode='a'):
self.hdffile = hdffile
self.mode = mode
self.open()
def open(self):
try:
self.hdf = h5py.File(self.hdffile,self.mode)
except:
print("Problem opening HDF5 file %s with mode %s" % (self.hdffile,self.mode))
raise
def close(self):
try:
self.hdf.close()
except:
print("Problem closing HDF5 file %s" % self.hdffile)
def provide_dataset(self,name,shape,dtype='float32',compression=False):
"""Open a dataset (possibly in a group), and return the handle.
Parameters:
-----------
name : str
Full name qualifier for the dataset within the HDF5 file
hierarchy. Can be the leaf on a tree. Example:
name = 'foo'
name = '/foo'
name = 'group1/group2/foo'
"""
# create dataset
if compression is False:
dataset = self.hdf.require_dataset(name, shape=shape, dtype=dtype)
else:
dataset = self.hdf.require_dataset(name, shape=shape, dtype=dtype, compression='gzip',compression_opts=9)
return dataset
def update_attrs(self,groupname,obj,attr,values):
print("Groupname: ", groupname)
group = self.hdf.require_group(groupname)
for j,attr in enumerate(attrs):
print(" Attribute: ", attr)
def store_attrs(self,groupname,obj,attrs,compression=False,dtype=None):
"""Store attributes of an object as datasets within a group in the hdf5 file.
Parameters:
-----------
groupname : str
Name of the group in the hdf5 file. If the group already
exists, it will be opened for access. Otherwise it will be
created.
obj : instance
An object whose members (at least some of them) are
supposed to be saved to the group. For example:
obj.attribute1, obj.foobar, etc.
attrs : seq of strings
The sequence of attribute names of obj, which should be
stored in the hdf5 file (under groupname). Only the
attributes listed in 'attrs' will be stored.
"""
print("Groupname: ", groupname)
group = self.hdf.require_group(groupname)
for attr in attrs:
print(" Attribute: ", attr)
if attr in group:
print(" Dataset '%s' already exists in group '%s'. Not touching it, continuing." % (attr,groupname))
else:
value = getattr(obj,attr)
# detect of value is a ragged array; if yes, store using special procedures
# if isinstance(value,(list,tuple,N.ndarray)) and self.isragged(value):
# if (not isinstance(value[0],str)) and self.isragged(value):
# if isinstance(value,(list,tuple,N.ndarray)) and (not isinstance(value[0],str)) and self.isragged(value):
if isinstance(value,N.ndarray) and self.isragged(value):
dataset = h.create_dataset(attr, (len(value),), dtype=dtype)
for j,v in enumerate(value):
dataset[j] = v
else:
if compression is False:
dataset = group.create_dataset(attr,data=value,dtype=dtype)
else:
dataset = group.create_dataset(attr,data=value,compression='gzip',compression_opts=9,dtype=dtype)
def isragged(arr):
"""Test if an array is ragged (i.e. unequal-length records).
Parameters
----------
arr : array
Returns
-------
ragged : bool
Returns True of not all entries in arr are of same length
(i.e. arr is indeed ragged). False otherwise.
"""
ragged = not (N.unique([len(r) for r in arr]).size == 1)
return ragged
class Hypercubes:
def __init__(self,rootdir,pattern,hdffile,mode='ram',memgigs=2.,hypercubenames=None,func='asciitable',compression=False,**kwargs):
"""Parameters:
-----------
rootdir : str
Path to the top-level directory holding the files to be
read.
pattern : str
File name pattern to match. See
get_pattern_and_paramnames() docstring for details.
hdffile : str
The hdf5 output file name where to store the hypercubes
and meta information.
mode : str
Either 'ram' (default) or 'disk'. Determines whether
hypercubes will be first built in RAM and then stored to
file in a final flush, or directly in an opened HDF5 file
(on disk). In 'ram' mode the creation is faster, but
limited by the available RAM on the system (but see
'memgigs'). 'disk' mode is slower, but the RAM usage is
negligible.
memgigs : float
If mode is 'ram', then 'memgigs' is the total size of RAM
(in GB) that the system is allowed to use to create the
hypercubes in-memory. Default is 2 GB. The total size of
RAM to be used is estimated before creating the
hypercubes. If it exceeds 'memgigs', an exception is
raised. Note that no checks are performed about
available/free RAM, so please use with caution.
hypercubenames : seq of strings
A list of names for the hypercubes to be stored.
func : str
The name of the function that will read individual files
in rootdir. The function should be provided in file
filefuncs.py
"""
self.hdffile = hdffile
self.hdf = HdfFile(self.hdffile,mode='a')
self.mode = mode
self.memgigs = memgigs
mykwargs = kwargs
mykwargs['hypercubenames'] = hypercubenames
# self.hypercubenames = hypercubenames
self.func = getattr(filefuncs,func)
self.funcname = self.func.func_name
# regex pattern, number of values, extracted parameter names (if any)
self.pattern, self.Nparam, self.paramnames = get_pattern_and_paramnames(pattern)
self.rootdir = rootdir
# get a list of all files under rootdir
files = get_files_in_dir(self.rootdir,verbose=False,returnsorted=True)
# match all files to pattern, keep only the matching ones, and a list of matched numerical values
self.matched_files, self.matched_values = match_pattern_to_strings(files,pattern=self.pattern,op=os.path.basename,progress=True)
# turn the list of all matched values (per file) into lists of unique values (per parameter)
theta_strings, self.theta, self.hypercubeshape = get_uniques(self.matched_values,returnnumerical=True)
# return hypercubes, but also update: theta, paramnames
self.hypercubes = self.convert(self.matched_files,self.matched_values,compression=compression,**mykwargs)
# As described in issue #5 on bitbucket, we need to work
# around a numpy and/or h5py bug. That's why we nan-pad
# self.theta, and store as a regular 2-d array (self.theta.pad)
#PA self.theta = padarray.PadArray(self.theta) # has members .pad (2-d array) and .unpad (list of 1-d arrays)
self.store2hdf(compression=compression)
# self.sanity()
print("Closing hdf5 file.")
self.hdf.close()
# def sanity(self):
#
# assert (len(set([h.shape for h in self.hypercubes])) == 1), "Not all cubes in 'hypercubes' have the same shape."
#
# if self.hypercubenames is not None:
# assert (len(self.hypercubenames) == self.Nhypercubes),\
# "The number of hypercube names given in 'hypercubenames' (%d) must be equal to the number of hypercubes (%d)." % (len(self.hypercubenames),self.Nhypercubes)
# else:
# self.hypercubenames = ['hc' + '%02d' % j for j in xrange(self.Nhypercubes)] # generic hypercube names, if none provided
def convert(self,files,values,compression=False,**kwargs):
"""Load specified columns from all pattern-matched files, and store
them in a list of n-dimensional hypercubes, each properly shaped
according to the matched parameter values.
Given the set of unique parameter values (for every
parameter), and the set of parameter values matched for a
given file, we can determine the n-dimensional position the
the current parameter values, i.e. where in the hypercube to
put data read from that file.
The indices of columns to read from every matched file are
taken from self.cols.
We have to do these somewhat complicated antics because we can
not rely on the correct sorting order of the files (wrt to the
parameter values they represent). If we could, we would simply
concatenate the numbers from all files, and then reshape them
to the n-dimensional hypercube using the known unique
parameter values sets.
Parameters
----------
files : list
List of path to all files that have been pattern-matched
for the parameter values matched in their file names.
values : list
List of parameter values match in each file in 'files'.
"""
# check in the first files how many rows there are to read
datasets, axnames, axvals, hypercubenames = self.func(files[0],**kwargs) # kwargs are: cols, e.g.: cols=(0,(1,2,3)), xcol are the values of the column given by kwarg 'xcol'
self.hypercubenames = hypercubenames
# how many cubes?
self.Nhypercubes = len(datasets)
# one dataset has hoe many dimensions?
ndim = datasets[0].ndim
self.axnames = axnames
self.axvals = axvals
if self.axnames is None:
self.axnames = ['ax%02d' % j for j in range(ndim)]
if self.axvals is None:
self.axvals = [N.arange(axissize) for axissize in datasets[0].shape] # this explictly assumes that all datasets are of same shape!
# extend hypercube shape by whatever the returned shape of datasets is
self.hypercubeshape = self.hypercubeshape + list(datasets[0].shape)
# prepare a list of n-dimensional hypercubes to hold the
# re-shaped column data (one hypercube per column read)
if self.mode == 'ram':
ramneeded = self.Nhypercubes * N.prod(self.hypercubeshape) * 4. / 1024.**3 # dataset size in GB, assuming float32
assert (ramneeded <= self.memgigs),\
"Mode 'ram' selected. Required RAM (%.3f) exceeds permitted RAM (%.3f). Check 'memgigs' parameter, or use mode='disk'." % (ramneeded,self.memgigs)
# careful not to reference the same physical array n times
hypercubes = [N.zeros(shape=self.hypercubeshape,dtype=N.float32) for j in range(self.Nhypercubes)]
elif self.mode == 'disk':
hypercubes = [self.hdf.provide_dataset(self.hypercubenames[j]+'/hypercube',self.hypercubeshape,dtype='float32',compression=compression) for j in range(self.Nhypercubes)]
nvalues = float(len(values))
# LOOP OVER GOOD FILES
print("Converting matched models | |
8, 9, 0)])
def testDailyByMonthAndNWeekDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 8, 9, 0)])
def testDailyByMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 2, 3, 9, 0),
datetime(1998, 3, 3, 9, 0)])
def testDailyByMonthAndMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 3, 3, 9, 0),
datetime(2001, 3, 1, 9, 0)])
def testDailyByYearDay(self):
self.assertEqual(list(rrule(DAILY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 9, 0),
datetime(1998, 1, 1, 9, 0),
datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0)])
def testDailyByYearDayNeg(self):
self.assertEqual(list(rrule(DAILY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 9, 0),
datetime(1998, 1, 1, 9, 0),
datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0)])
def testDailyByMonthAndYearDay(self):
self.assertEqual(list(rrule(DAILY,
count=4,
bymonth=(1, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 7, 19, 9, 0),
datetime(1999, 1, 1, 9, 0),
datetime(1999, 7, 19, 9, 0)])
def testDailyByMonthAndYearDayNeg(self):
self.assertEqual(list(rrule(DAILY,
count=4,
bymonth=(1, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 7, 19, 9, 0),
datetime(1999, 1, 1, 9, 0),
datetime(1999, 7, 19, 9, 0)])
def testDailyByWeekNo(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 5, 11, 9, 0),
datetime(1998, 5, 12, 9, 0),
datetime(1998, 5, 13, 9, 0)])
def testDailyByWeekNoAndWeekDay(self):
# That's a nice one. The first days of week number one
# may be in the last year.
self.assertEqual(list(rrule(DAILY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 29, 9, 0),
datetime(1999, 1, 4, 9, 0),
datetime(2000, 1, 3, 9, 0)])
def testDailyByWeekNoAndWeekDayLarge(self):
# Another nice test. The last days of week number 52/53
# may be in the next year.
self.assertEqual(list(rrule(DAILY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 9, 0),
datetime(1998, 12, 27, 9, 0),
datetime(2000, 1, 2, 9, 0)])
def testDailyByWeekNoAndWeekDayLast(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 9, 0),
datetime(1999, 1, 3, 9, 0),
datetime(2000, 1, 2, 9, 0)])
def testDailyByWeekNoAndWeekDay53(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 12, 28, 9, 0),
datetime(2004, 12, 27, 9, 0),
datetime(2009, 12, 28, 9, 0)])
def testDailyByEaster(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 12, 9, 0),
datetime(1999, 4, 4, 9, 0),
datetime(2000, 4, 23, 9, 0)])
def testDailyByEasterPos(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 13, 9, 0),
datetime(1999, 4, 5, 9, 0),
datetime(2000, 4, 24, 9, 0)])
def testDailyByEasterNeg(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 11, 9, 0),
datetime(1999, 4, 3, 9, 0),
datetime(2000, 4, 22, 9, 0)])
def testDailyByHour(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0),
datetime(1997, 9, 3, 6, 0),
datetime(1997, 9, 3, 18, 0)])
def testDailyByMinute(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6),
datetime(1997, 9, 2, 9, 18),
datetime(1997, 9, 3, 9, 6)])
def testDailyBySecond(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 6),
datetime(1997, 9, 2, 9, 0, 18),
datetime(1997, 9, 3, 9, 0, 6)])
def testDailyByHourAndMinute(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6),
datetime(1997, 9, 2, 18, 18),
datetime(1997, 9, 3, 6, 6)])
def testDailyByHourAndSecond(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0, 6),
datetime(1997, 9, 2, 18, 0, 18),
datetime(1997, 9, 3, 6, 0, 6)])
def testDailyByMinuteAndSecond(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6, 6),
datetime(1997, 9, 2, 9, 6, 18),
datetime(1997, 9, 2, 9, 18, 6)])
def testDailyByHourAndMinuteAndSecond(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6, 6),
datetime(1997, 9, 2, 18, 6, 18),
datetime(1997, 9, 2, 18, 18, 6)])
def testDailyBySetPos(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byhour=(6, 18),
byminute=(15, 45),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 15),
datetime(1997, 9, 3, 6, 45),
datetime(1997, 9, 3, 18, 15)])
def testHourly(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 2, 10, 0),
datetime(1997, 9, 2, 11, 0)])
def testHourlyInterval(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 2, 11, 0),
datetime(1997, 9, 2, 13, 0)])
def testHourlyIntervalLarge(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
interval=769,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 10, 4, 10, 0),
datetime(1997, 11, 5, 11, 0)])
def testHourlyByMonth(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 1, 0),
datetime(1998, 1, 1, 2, 0)])
def testHourlyByMonthDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 3, 0, 0),
datetime(1997, 9, 3, 1, 0),
datetime(1997, 9, 3, 2, 0)])
def testHourlyByMonthAndMonthDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 5, 0, 0),
datetime(1998, 1, 5, 1, 0),
datetime(1998, 1, 5, 2, 0)])
def testHourlyByWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 2, 10, 0),
datetime(1997, 9, 2, 11, 0)])
def testHourlyByNWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 2, 10, 0),
datetime(1997, 9, 2, 11, 0)])
def testHourlyByMonthAndWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 1, 0),
datetime(1998, 1, 1, 2, 0)])
def testHourlyByMonthAndNWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 1, 0),
datetime(1998, 1, 1, 2, 0)])
def testHourlyByMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 1, 0),
datetime(1998, 1, 1, 2, 0)])
def testHourlyByMonthAndMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 0, 0),
datetime(1998, 1, 1, 1, 0),
datetime(1998, 1, 1, 2, 0)])
def testHourlyByYearDay(self):
self.assertEqual(list(rrule(HOURLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 0, 0),
datetime(1997, 12, 31, 1, 0),
datetime(1997, 12, 31, 2, 0),
datetime(1997, 12, 31, 3, 0)])
def testHourlyByYearDayNeg(self):
self.assertEqual(list(rrule(HOURLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 0, 0),
datetime(1997, 12, 31, 1, 0),
datetime(1997, 12, 31, 2, 0),
datetime(1997, 12, 31, 3, 0)])
def testHourlyByMonthAndYearDay(self):
self.assertEqual(list(rrule(HOURLY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 10, 0, 0),
datetime(1998, 4, 10, 1, 0),
datetime(1998, 4, 10, 2, 0),
datetime(1998, 4, 10, 3, 0)])
def testHourlyByMonthAndYearDayNeg(self):
self.assertEqual(list(rrule(HOURLY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 10, 0, 0),
datetime(1998, 4, 10, 1, 0),
datetime(1998, 4, 10, 2, 0),
datetime(1998, 4, 10, 3, 0)])
def testHourlyByWeekNo(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 5, 11, 0, 0),
datetime(1998, 5, 11, 1, 0),
datetime(1998, 5, 11, 2, 0)])
def testHourlyByWeekNoAndWeekDay(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 29, 0, 0),
datetime(1997, 12, 29, 1, 0),
datetime(1997, 12, 29, 2, 0)])
def testHourlyByWeekNoAndWeekDayLarge(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 0, 0),
datetime(1997, 12, 28, 1, 0),
datetime(1997, 12, 28, 2, 0)])
def testHourlyByWeekNoAndWeekDayLast(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 0, 0),
datetime(1997, 12, 28, 1, 0),
datetime(1997, 12, 28, 2, 0)])
def testHourlyByWeekNoAndWeekDay53(self):
self.assertEqual(list(rrule(HOURLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 12, 28, 0, 0),
datetime(1998, 12, 28, 1, 0),
| |
'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('t_3', 'v_5'), ('t_3', 'c_1'), ('c_1', 'v_4'), ('t_3', 'v_8'), ('c_1', 't_2'),
('t_3', 'v_6'), ('v_9', 't_2'), ('v_7', 't_2'), ('v_8', 't_3'), ('v_5', 't_3'), ('v_6', 't_3'),
('t_2', 'v_7'), ('c_1', 't_3'), ('t_2', 'v_9'), ('v_4', 'c_1')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('t_3', 'v_5'), ('t_3', 'c_1'), ('c_1', 'v_4'), ('t_3', 'v_7'), ('v_8', 't_2'),
('t_2', 'v_8'), ('c_1', 't_2'), ('t_3', 'v_6'), ('v_9', 't_2'), ('v_5', 't_3'), ('v_6', 't_3'),
('v_7', 't_3'), ('c_1', 't_3'), ('t_2', 'v_9'), ('v_4', 'c_1')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('c_1', 'v_5'), ('t_2', 'c_1'), ('v_5', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_8'), ('t_3', 'v_9'),
('t_2', 'v_4'), ('c_1', 't_2'), ('t_2', 'v_6'), ('v_4', 't_2'), ('v_6', 't_2'), ('v_7', 't_2'),
('v_8', 't_3'), ('v_9', 't_3'), ('t_2', 'v_7'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('c_1', 'v_5'), ('t_2', 'c_1'), ('v_5', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_8', 't_2'),
('t_3', 'v_9'), ('t_2', 'v_4'), ('c_1', 't_2'), ('t_2', 'v_8'), ('t_2', 'v_6'), ('v_6', 't_2'),
('v_7', 't_3'), ('v_9', 't_3'), ('v_4', 't_2'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('c_1', 'v_5'), ('t_2', 'c_1'), ('v_5', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('t_3', 'v_8'),
('t_2', 'v_4'), ('c_1', 't_2'), ('t_2', 'v_6'), ('v_6', 't_2'), ('v_7', 't_3'), ('v_9', 't_2'),
('v_8', 't_3'), ('v_4', 't_2'), ('c_1', 't_3'), ('t_2', 'v_9')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('c_1', 'v_5'), ('t_2', 'c_1'), ('v_5', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('t_3', 'v_8'),
('t_3', 'v_9'), ('t_2', 'v_4'), ('c_1', 't_2'), ('t_2', 'v_6'), ('v_6', 't_2'), ('v_7', 't_3'),
('v_8', 't_3'), ('v_9', 't_3'), ('v_4', 't_2'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('c_1', 'v_5'), ('t_2', 'c_1'), ('v_5', 'c_1'), ('t_3', 'c_1'), ('v_8', 't_2'), ('t_3', 'v_9'),
('t_2', 'v_4'), ('c_1', 't_2'), ('t_2', 'v_8'), ('v_4', 't_2'), ('t_3', 'v_6'), ('v_7', 't_2'),
('v_9', 't_3'), ('t_2', 'v_7'), ('c_1', 't_3'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('c_1', 'v_5'), ('t_2', 'c_1'), ('v_5', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_8'), ('t_2', 'v_4'),
('c_1', 't_2'), ('v_4', 't_2'), ('t_3', 'v_6'), ('v_9', 't_2'), ('v_7', 't_2'), ('v_8', 't_3'),
('t_2', 'v_7'), ('c_1', 't_3'), ('t_2', 'v_9'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('c_1', 'v_5'), ('t_2', 'c_1'), ('v_5', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_8'), ('t_3', 'v_9'),
('t_2', 'v_4'), ('c_1', 't_2'), ('v_4', 't_2'), ('t_3', 'v_6'), ('v_7', 't_2'), ('v_8', 't_3'),
('v_9', 't_3'), ('t_2', 'v_7'), ('c_1', 't_3'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('c_1', 'v_5'), ('t_2', 'c_1'), ('v_5', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_8', 't_2'),
('t_2', 'v_4'), ('c_1', 't_2'), ('t_2', 'v_8'), ('t_3', 'v_6'), ('v_7', 't_3'), ('v_9', 't_2'),
('v_4', 't_2'), ('c_1', 't_3'), ('t_2', 'v_9'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('c_1', 'v_5'), ('t_2', 'c_1'), ('v_5', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_8', 't_2'),
('t_3', 'v_9'), ('t_2', 'v_4'), ('c_1', 't_2'), ('t_2', 'v_8'), ('t_3', 'v_6'), ('v_7', 't_3'),
('v_9', 't_3'), ('v_4', 't_2'), ('c_1', 't_3'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('c_1', 'v_5'), ('t_2', 'c_1'), ('v_5', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('t_3', 'v_8'),
('t_2', 'v_4'), ('c_1', 't_2'), ('t_3', 'v_6'), ('v_7', 't_3'), ('v_9', 't_2'), ('v_8', 't_3'),
('v_4', 't_2'), ('c_1', 't_3'), ('t_2', 'v_9'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('c_1', 'v_5'), ('t_2', 'c_1'), ('v_5', 'c_1'), ('t_3', 'c_1'), ('v_8', 't_2'), ('t_3', 'v_4'),
('t_3', 'v_9'), ('t_2', 'v_8'), ('c_1', 't_2'), ('t_2', 'v_6'), ('v_6', 't_2'), ('v_4', 't_3'),
('v_7', 't_2'), ('v_9', 't_3'), ('t_2', 'v_7'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('c_1', 'v_5'), ('t_2', 'c_1'), ('v_5', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_4'), ('t_3', 'v_8'),
('c_1', 't_2'), ('t_2', | |
<filename>lsdviztools/lsdplottingtools/lsdmap_basicmaps.py<gh_stars>1-10
#=============================================================================
#=============================================================================
# These functions create figures for Basic visualization
#
#
# It creates separate plots for each basin in the DEM.
#
# Authors:
# <NAME>
# <NAME>
#=============================================================================
#=============================================================================
# IMPORT MODULES
#=============================================================================
# set backend to run on server
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
import matplotlib.ticker as ticker
import pandas as pd
from matplotlib import colors
import math
import os
import subprocess
#from shapely.geometry import Polygon
from lsdmapfigure import plottinghelpers as Helper
from lsdmapfigure.plottingraster import MapFigure
from lsdmapfigure.plottingraster import BaseRaster
def PlotTopoRaster(DataDirectory, fname_prefix, size_format='ESURF', FigFormat='png', colors = "terrain"):
"""
Creates a basic Terrain topographic raster. Needs the Hillshade
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
size_format (str): Can be "big" (16 inches wide), "geomorphology" (6.25 inches wide), or "ESURF" (4.92 inches wide) (defualt esurf).
FigFormat (str): The format of the figure. Usually 'png' or 'pdf'. If "show" then it calls the matplotlib show() command.
Returns:
Shaded relief plot with the basins coloured by basin ID
Author: BG, FJC
"""
# check if a directory exists for the chi plots. If not then make it.
raster_directory = DataDirectory+'raster_plots/'
if not os.path.isdir(raster_directory):
os.makedirs(raster_directory)
# Set up fonts for plots
label_size = 10
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Liberation Sans']
rcParams['font.size'] = label_size
# set figure sizes based on format
if size_format == "geomorphology":
fig_width_inches = 6.25
elif size_format == "big":
fig_width_inches = 16
else:
fig_width_inches = 4.92126
# going to make the basin plots - need to have bil extensions.
print("I'm going to make a cabic topographic plot")
# get the rasters
raster_ext = '.bil'
## Just checking if you have a PP version of it
if os.path.isfile(DataDirectory + fname_prefix +"_PP.bil"):
BackgroundRasterName = fname_prefix+"_PP"+raster_ext
else:
BackgroundRasterName = fname_prefix+raster_ext
HillshadeName = fname_prefix+'_hs'+raster_ext
# create the map figure
MF = MapFigure(BackgroundRasterName, DataDirectory,coord_type="UTM_km", colourbar_location='None')
# Drape the hillshade and add the color
## Frist plot the terrain toporaster
MF.add_drape_image(BackgroundRasterName,DataDirectory, # Calling the function will add a drapped raster on the top of the background on
colourmap = colors, # colormap used for this raster, see http://matplotlib.org/users/colormaps.html for examples, put _r at the end of a colormap to get the reversed version
alpha=1, # transparency of this specific layer, 0 for fully transparent (why not) and 1 for fully opaque
show_colourbar = True, # Well, this one is explicit I think
colorbarlabel = "None",
NFF_opti = True)
## Drape the Hillshade raster
MF.add_drape_image(HillshadeName,DataDirectory, # Calling the function will add a drapped raster on the top of the background on
colourmap = "gray", # colormap used for this raster, see http://matplotlib.org/users/colormaps.html for examples, put _r at the end of a colormap to get the reversed version
alpha=0.4, # transparency of this specific layer, 0 for fully transparent (why not) and 1 for fully opaque
show_colourbar = True, # Well, this one is explicit I think
colorbarlabel = "None",
NFF_opti = True)
# Save the figure
ImageName = raster_directory+fname_prefix+'_Topo.'+FigFormat
MF.save_fig(fig_width_inches = fig_width_inches, FigFileName = ImageName, FigFormat=FigFormat, Fig_dpi = 300)
def PlotSlopeRaster(DataDirectory, fname_prefix, size_format='ESURF', FigFormat='png'):
"""
Creates a basic Slope Map with a [0,2] scale
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
size_format (str): Can be "big" (16 inches wide), "geomorphology" (6.25 inches wide), or "ESURF" (4.92 inches wide) (defualt esurf).
FigFormat (str): The format of the figure. Usually 'png' or 'pdf'. If "show" then it calls the matplotlib show() command.
Returns:
Shaded relief plot with the basins coloured by basin ID
Author: BG, FJC
"""
# check if a directory exists for the chi plots. If not then make it.
raster_directory = DataDirectory+'raster_plots/'
if not os.path.isdir(raster_directory):
os.makedirs(raster_directory)
# Set up fonts for plots
label_size = 10
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Liberation Sans']
rcParams['font.size'] = label_size
# set figure sizes based on format
if size_format == "geomorphology":
fig_width_inches = 6.25
elif size_format == "big":
fig_width_inches = 16
else:
fig_width_inches = 4.92126
# going to make the basin plots - need to have bil extensions.
print("I'm going to make a cabic topographic plot")
# get the rasters
raster_ext = '.bil'
## Just checking if you have a PP version of it
BackgroundRasterName = fname_prefix+"_slope"+raster_ext
# create the map figure
MF = MapFigure(BackgroundRasterName, DataDirectory,coord_type="UTM_km", colourbar_location='None')
# Drape the hillshade and add the color
## Frist plot the black background
MF.add_drape_image(BackgroundRasterName,DataDirectory, # Calling the function will add a drapped raster on the top of the background on
colourmap = "gray", # colormap used for this raster, see http://matplotlib.org/users/colormaps.html for examples, put _r at the end of a colormap to get the reversed version
alpha=1, # transparency of this specific layer, 0 for fully transparent (why not) and 1 for fully opaque
show_colourbar = True, # Well, this one is explicit I think
colorbarlabel = "None",
colour_min_max = [0,100000],
custom_min_max = [0,0.1],
NFF_opti = True)
## Drape the slope raster
MF.add_drape_image(BackgroundRasterName,DataDirectory, # Calling the function will add a drapped raster on the top of the background on
colourmap = "viridis", # colormap used for this raster, see http://matplotlib.org/users/colormaps.html for examples, put _r at the end of a colormap to get the reversed version
alpha=1, # transparency of this specific layer, 0 for fully transparent (why not) and 1 for fully opaque
show_colourbar = True, # Well, this one is explicit I think
colour_min_max = [0,2],
colorbarlabel = "None",
NFF_opti = True)
# Save the figure
ImageName = raster_directory+fname_prefix+'_Slopo.'+FigFormat
MF.save_fig(fig_width_inches = fig_width_inches, FigFileName = ImageName, FigFormat=FigFormat, Fig_dpi = 300)
def PlotCurveRaster(DataDirectory, fname_prefix, size_format='ESURF', FigFormat='png'):
"""
Creates a basic Slope Map with a [0,2] scale
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
size_format (str): Can be "big" (16 inches wide), "geomorphology" (6.25 inches wide), or "ESURF" (4.92 inches wide) (defualt esurf).
FigFormat (str): The format of the figure. Usually 'png' or 'pdf'. If "show" then it calls the matplotlib show() command.
Returns:
Shaded relief plot with the basins coloured by basin ID
Author: BG, FJC
"""
# check if a directory exists for the chi plots. If not then make it.
raster_directory = DataDirectory+'raster_plots/'
if not os.path.isdir(raster_directory):
os.makedirs(raster_directory)
# Set up fonts for plots
label_size = 10
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Liberation Sans']
rcParams['font.size'] = label_size
# set figure sizes based on format
if size_format == "geomorphology":
fig_width_inches = 6.25
elif size_format == "big":
fig_width_inches = 16
else:
fig_width_inches = 4.92126
# going to make the basin plots - need to have bil extensions.
print("I'm going to make a cabic topographic plot")
# get the rasters
raster_ext = '.bil'
## Just checking if you have a PP version of it
BackgroundRasterName = fname_prefix+"_curvature"+raster_ext
# create the map figure
MF = MapFigure(BackgroundRasterName, DataDirectory,coord_type="UTM_km", colourbar_location='None')
# Drape the hillshade and add the color
## Frist plot the black background
MF.add_drape_image(BackgroundRasterName,DataDirectory, # Calling the function will add a drapped raster on the top of the background on
colourmap = "gray", # colormap used for this raster, see http://matplotlib.org/users/colormaps.html for examples, put _r at the end of a colormap to get the reversed version
alpha=1, # transparency of this specific layer, 0 for fully transparent (why not) and 1 for fully opaque
show_colourbar = True, # Well, this one is explicit I think
colorbarlabel = "None",
colour_min_max = [0,100000],
custom_min_max = [0,0.1],
NFF_opti = True)
## Drape the slope raster
MF.add_drape_image(BackgroundRasterName,DataDirectory, # Calling the function will add a drapped raster on the top of the background on
colourmap = "viridis", # colormap used for this raster, see http://matplotlib.org/users/colormaps.html for examples, put _r at the end of a colormap to get the reversed version
alpha=1, # transparency of this specific layer, 0 for fully transparent (why not) and 1 for fully opaque
show_colourbar = True, # Well, this one is explicit I | |
<reponame>GambitBSM/gambit_2.0
# GUM: GAMBIT Universal Model Machine
# ***********************************
# \file
#
# Contains all routines for parsing input .gum files and
# the SARAH/FeynRules model files.
#
# *************************************
#
# \author <NAME>
# (<EMAIL>)
# \date 2018, 2019, 2020
#
# \author <NAME>
# (<EMAIL>)
# \date 2018, 2019
#
# \author <NAME>
# (<EMAIL>)
# \date 2020 Jan
#
# **************************************
# TODO add mass dimension for new parameters in .gum file
import yaml
import re
from distutils.dir_util import copy_tree
from collections import defaultdict
from .setup import *
from .cmake_variables import *
"""
.GUM FILE PARSING
"""
class Inputs:
"""
All the inputs from the .GUM file. Returns the master
"gum" object used internally.
"""
def __init__(self, model_name, base_model, mathpackage,
wimp_candidate, invisibles, decaying_dm = False,
mathname = None, lagrangian = None, restriction = None):
self.name = model_name.replace('-','_')
self.base_model = base_model
# Set the DM PDG code from either the decaying DM or WIMP candidate
if decaying_dm:
self.dm_pdg = decaying_dm
self.dm_decays = True
else:
self.dm_pdg = wimp_candidate
self.dm_decays = False
self.invisibles_pdg = invisibles
self.math = mathpackage
self.restriction = None
self.LTot = lagrangian
self.spec = "{0}_spectrum".format(model_name.replace('-','_'))
# If we want the new GAMBIT model to have a different
# name than the model file from Mathematica
if mathname:
self.mathname = mathname
else:
self.mathname = model_name
if restriction:
self.restriction = restriction
else:
self.restriction = ''
class Outputs:
"""
Outputs for GUM to write.
"""
def __init__(self, mathpackage, calchep = False, pythia = False,
micromegas = False, spheno = False,
vevacious = False, ufo = False, options = {}):
self.ch = calchep
self.pythia = pythia
self.mo = micromegas
self.spheno = spheno
self.vev = vevacious
self.ufo = ufo
self.options = options
# Overwrite these, as the output does not exist.
if mathpackage == 'feynrules':
self.spheno = False
self.vev = False
# If Pythia is set then we also have UFO files, of course
if pythia == True: self.ufo = True
# If vevacious is needed, we have to have SPheno too...
if vevacious == True and spheno == False:
raise GumError(("\n\nCurrently, gum needs SPheno output to be able "
"to produce Vevacious output.\nPlease change "
"your .gum file (and SARAH files, if necessary)."))
def bes(self):
backends = []
if self.ch: backends.append('calchep')
if self.pythia: backends.append('pythia')
if self.mo: backends.append('micromegas')
if self.spheno: backends.append('spheno')
if self.vev: backends.append('vevacious')
if self.ufo: backends.append('ufo')
return backends
def check_gum_file(inputfile):
"""
Checks the input .GUM file for all necessary inputs.
"""
print("Attempting to parse {0}...".format(inputfile))
if inputfile.endswith(".gum"):
pass
else:
if inputfile.endswith(".mug"):
raise GumError(("\n\nGUM called with a .mug file in normal mode --"
" you probably want to call gum with the -r flag:"
"\n\n ./gum -r " + inputfile + "\n"))
else:
raise GumError("\n\nInput filetype must be .gum.")
with open(inputfile, "r") as f:
try:
data = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)
if not 'math' in data:
raise GumError(("\n\n'math' node needed in .gum file."))
if not 'package' in data['math']:
# Don't know what to run...!
raise GumError(("\n\nNo mathpackage input - what do you expect "
"GUM to do? Please check your .gum file. "
"Supported entries: sarah, feynrules."))
if data['math']['package'] not in ["sarah", "feynrules"]:
raise GumError(("\n\nYou must specify which mathpackage you want "
"GUM to use. Please check your .gum file. "
"Supported entries: sarah, feynrules."))
if not 'model' in data['math']:
raise GumError(("\n\nNo model file specified. "
"Please check your .gum file."))
if not 'output' in data:
# Don't know what to generate!
raise GumError(("\n\nNo output specified! You need to tell GUM "
"what it is you'd like it to do!\n"
"Please change your .gum file!"))
print("All required YAML nodes present...")
return data
def fill_gum_object(data):
"""
Returns a model of type Inputs for GUM to work with. 'data' is the
parsed data from check_gum_file.
"""
math = data['math']
mathpackage = math['package']
lagrangian = ""
if mathpackage == "feynrules":
if 'lagrangian' in data['math']:
# Check the Lagrangian makes sense (i.e. is all alphanumeric)
lagrangian = data['math']['lagrangian']
L = lagrangian.split('+')
for l in L:
if not l.strip(' ').isalnum():
raise GumError(("Non-alphanumeric character detected in "
" the Lagrangian. Please check your .gum "
"file."))
else:
raise GumError(("\n\nYou must specify the Lagrangian for your "
"model!\n This can be either a single entry like "
"'LTotal', or a sum of strings, like 'LSM + LDM'. "
"Please amend your .gum file."))
gambit_model = math['model']
# Overwrite the GAMBIT model if specified
mathname = ""
if 'gambit_opts' in data:
if 'model_name' in data['gambit_opts']:
mathname = gambit_model
gambit_model = data['gambit_opts']['model_name']
# FeynRules specific -- a "base" model to build a pheno model on top of.
# Typically this is the SM, plus the BSM contribution defined in a
# separate file.
if 'base_model' in data['math']:
base_model = data['math']['base_model']
else:
base_model = ""
if 'wimp_candidate' in data:
wimp_candidate = data['wimp_candidate']
else:
wimp_candidate = None
if 'invisibles' in data:
invisibles = data['invisibles']
else:
invisibles = []
backends = ['calchep', 'pythia', 'spheno', 'ufo',
'micromegas', 'vevacious']
opts = {}
# The outputs GUM should hook up to GAMBIT, if specified
if 'output' in data:
for i in backends:
if i in data['output']:
opts[i] = data['output'][i]
else:
opts[i] = False
if all(value == False for value in list(opts.values())):
raise GumError(("\n\nAll backend output set to false in your .gum "
"file.\nGive GUM something to do!\n"
"Please change your .gum file."))
options = {}
# Options for the outputs declared
if 'output_options' in data:
for output in data['output_options']:
if output not in opts.keys():
raise GumError(("\n\nOptions given to output " + output + " "
"which is not declared as gum output.\n"
"Please change your .gum file."))
options[output] = data['output_options'][output]
# If we've got this far, we'll also force some decays to be written,
# either by SPheno or by CalcHEP.
# N.B. vevacious is conditional on SPheno
# This now means if any of: Pythia(ufo) or MicrOMEGAs are requested,
# then we'll default to activating CalcHEP (unless SPheno requested)
set_calchep = True
if mathpackage == 'sarah' and opts['spheno'] == True:
set_calchep = False
if set_calchep:
opts['calchep'] = True
outputs = Outputs(mathpackage, options=options, **opts)
# See if we're told DM is a decaying particle or not...
if 'decaying_dm_candidate' in data:
decaying_dm = data['decaying_dm_candidate']
else:
decaying_dm = None
# If decaying DM + WIMP candidate -> throw error
if decaying_dm and wimp_candidate:
raise GumError(("\n\nYou have specified both a WIMP candidate and "
"a decaying DM candidate.\nGUM can only handle one "
"of these at present. Please amend your .gum file.\n"))
# If the user wants MicrOMEGAs output but hasn't specified a DM candidate
if not (wimp_candidate or decaying_dm) and outputs.mo:
raise GumError(("\n\nYou have asked for MicrOMEGAs output but have not "
"specified which particle is meant to be the DM "
"candidate! Please add an entry to your .gum file "
"like:\n\nwimp_candidate: 9900001 "
"# <--- Desired PDG code here.\n"))
# FeynRules restriction files
restriction = None
if 'restriction' in math and mathpackage == 'feynrules':
restriction = math['restriction']
gum_info = Inputs(gambit_model, base_model, mathpackage,
wimp_candidate, invisibles, decaying_dm,
mathname, lagrangian, restriction)
print("Parse successful.")
return gum_info, outputs
"""
FEYNRULES PARSING
"""
def parse_feynrules_model_file(model_name, base_model, outputs):
"""
Parses a FeynRules model file. Checks for the following:
- Every parameter has an LH block and an index
- No particles have an external name with underscores etc.
- Every parameter has an interaction order specified *if* the user
requests UFO output
- ComplexParameters and CalcHEP output
TODO check base_model too
"""
# Figure out the path pointing to the FeynRules file
# First - check for it in the FeynRules directory
fr_file_path = FEYNRULES_PATH + ("/Models/{0}/{0}.fr").format(model_name)
# If it doesn't exist, try the GUM models folder
if not os.path.isfile(fr_file_path):
fr_file_path = GUM_DIR + ("/Models/{0}/{0}.fr").format(model_name)
if not os.path.isfile(fr_file_path):
raise GumError(("GUM Error: Unable to find the model {0} in either "
"the FeynRules model directory, or the GUM model "
"directory!\nPlease move it to one of "
"these locations.").format(model_name))
payattn = False
# Read the input in
with open(fr_file_path, 'r') as f:
lines = f.readlines()
# Flatten the string
contents = "".join(lines).replace("\n","")
# Parse the contents
# | |
"""Actions to define on the Input parameters.
This module defines some of the most common actions that a BigDFT user might
like to perform on the input file. Such module therefore sets some of the keys
of the input dictionary to the values needed to perform the operations.
Users might also inspire to the actions performed in order to customize the
runs in a different way. All the functions of this module have as first
argument ``inp``, the dictionary of the input parameters.
Many other actions are available in BigDFT code. This module only regroups the
most common. Any of these functionalities might be removed from the input file
by the :py:func:`remove` function.
Note:
Any of the action of this module, including the :py:func:`remove` function,
can be also applied to an instance of the
:py:class:`BigDFT.Inputfiles.Inputfile` class, by removing the first
argument (``inp``). This adds extra flexibility as the same method may be
used to a dictionary instance or to a BigDFT input files.
See the example :ref:`input_action_example`.
Note:
Each of the actions here **must** have default value for the arguments
(except the input dictionary ``inp``). This is needed for a good behaviour
of the function `remove`.
.. autosummary::
remove
set_xc
set_hgrid
set_rmult
set_wavefunction_convergence
set_atomic_positions
set_mesh_sizes
optimize_geometry
spin_polarize
charge
charge_and_polarize
set_symmetry
apply_electric_field
set_random_inputguess
write_orbitals_on_disk
read_orbitals_from_disk
write_density_on_disk
calculate_dipole
use_gpu_acceleration
change_data_directory
connect_run_data
add_empty_SCF_orbitals
extract_virtual_states
set_electronic_temperature
calculate_tddft_coupling_matrix
write_support_function_matrices
"""
from futile.Utils import dict_set
__set__ = dict_set
"""func: Action function.
This is the pointer to the set function, useful to modify the action with the
undo method
"""
def __undo__(inp, *subfields):
"""
Eliminate the last item of the subfields as provided to dict_set
"""
from futile.Utils import push_path
# remove the last key until the parent is empty
lastkey = -1
tmp = {}
while len(subfields) > -lastkey and tmp == {}:
keys = subfields[:lastkey]
tmp, k = push_path(inp, *keys)
tmp.pop(k)
lastkey -= 1
def remove(inp, action):
"""Remove action from the input dictionary.
Remove an action from the input file, thereby restoring the **default**
value, as if the action were not specified.
Args:
inp (dict): dictionary to remove the action from.
action (func): one of the actions of this module. It does not need to be
specified before, in which case it produces no effect.
Example:
>>> from Calculators import SystemCalculator as C
>>> code=C()
>>> inp={}
>>> set_xc(inp,'PBE')
>>> write_orbitals_on_disk(inp)
>>> log=code.run(input=inp) # perform calculations
>>> remove(write_orbitals_on_disk) #remove the action
>>> read_orbitals_from_disk(inp)
>>> # this will restart the SCF from the previous orbitals
>>> log2=code.run(input=inp)
"""
global __set__
__set__ = __undo__
action(inp)
__set__ = dict_set
def set_hgrid(inp, hgrids=0.4):
"""
Set the wavelet grid spacing.
Args:
hgrid (float,list): list of the grid spacings in the three directions.
It might also be a scalar, which implies the same spacing
"""
__set__(inp, 'dft', 'hgrids', hgrids)
def set_wavefunction_convergence(inp, gnrm=1.0e-04):
"""
Set the tolerance acceptance level for stopping the self-consistent
iterations
Args:
gnrm (float): the tolerance level
"""
__set__(inp, 'dft', 'gnrm_cv', gnrm)
def set_rmult(inp, rmult=None, coarse=5.0, fine=8.0):
"""
Set the wavelet grid extension by modifying the multiplicative radii.
Args:
rmult (float,list): list of two values that have to be used for the
coarse and the fine resolution grid. It may also be a scalar.
coarse (float): if the argument ``rmult`` is not provided it sets the
coarse radius multiplier
fine (float): if the argument ``rmult`` is not provided it sets the fine
radius multiplier
"""
rmlt = [coarse, fine] if rmult is None else rmult
__set__(inp, 'dft', 'rmult', rmlt)
def set_symmetry(inp, yes=True):
"""
Set the symmetry detection for the charge density and the ionic forces and
stressdef set_symmetry(inp,yes=True):
Args:
yes (bool): If ``False`` the symmetry detection is disabled
"""
__set__(inp, 'dft', 'disablesym', not yes)
def set_linear_scaling(inp):
"""
Activates the linear scaling mode
"""
newid = 'linear'
previous_ipid = inp.get('dft', 'False')
if previous_ipid:
previous_ipid = inp.get('inputpsiid', 'False')
if previous_ipid == 2:
newid = 102
__set__(inp, 'dft', 'inputpsiid', newid)
def set_mesh_sizes(inp, ngrids=64):
"""
Constrain the number of grid points in each direction.
This is useful when performing periodic system calculations with variable
cells which need to be compared each other. In this way the number of
degrees of freedom is kept constant throughout the various simuilations.
Args:
ngrids (int,list): list of the number of mesh points in each direction.
Might be a scalar.
"""
__set__(inp, 'dft', 'ngrids', ngrids)
def spin_polarize(inp, mpol=1):
"""
Add a collinear spin polarization to the system.
Arguments:
mpol (int): spin polarization in Bohr magneton units.
"""
__set__(inp, 'dft', 'nspin', 2)
__set__(inp, 'dft', 'mpol', mpol)
def charge(inp, charge=-1):
"""
Charge the system
Arguments:
charge (int,float): value of the charge in units of *e* (the electron
has charge -1). Also accept floating point numbers.
"""
__set__(inp, 'dft', 'qcharge', charge)
def apply_electric_field(inp, elecfield=[0, 0, 1.e-3]):
"""
Apply an external electric field on the system
Args:
electric (list, float): Values of the Electric Field in the three
directions. Might also be a scalar.
"""
__set__(inp, 'dft', 'elecfield', [e for e in elecfield])
def charge_and_polarize(inp):
"""
Charge the system by removing one electron. Assume that the original
system is closed shell, thus polarize.
"""
charge(inp, charge=1)
spin_polarize(inp, mpol=1)
def set_SCF_method(inp, method='dirmin', mixing_on='density',
mixing_scheme='Pulay'):
"""
Set the algorithm for SCF.
Args:
method (str): The algoritm chosen. Might be different for the cubic (CS)
or linear scaling (LS) algorithm.
* dirmin: Direct minimization approach (valid both to LS and CS)
* mixing: Mixing scheme (only CS)
* foe: Fermi Operator Expansion (only LS)
* pexsi: Pole EXpansion and Selected Inversion method (only LS,
require PEXSI compilation)
* diag: Explicit diagonalization (only LS, for validation purposes)
mixing_on (str): May be ``"density"`` or ``"potential"`` in the
``"mixing"`` case, decide to which quantity the mixing to be performed
mixing_scheme (str): May be:
* Pulay : DIIS mixing on the last 7 iterations
* Simple: Simple mixing
* Anderson: Anderson scheme
* Anderson2: Anderson scheme based on the two pervious iterations
* CG: Conjugate Gradient based on the minimum of the energy with
respect of the potential
Warning:
Only the FOE method exhibit asymptotic linear scaling regime.
Todo:
Check if the linear scaling case needs another input variable for the
mixing of the potential (density)
"""
method.upper()
if method != 'MIXING':
__set__(inp, 'lin_kernel', 'linear_method', method)
if method == 'DIRMIN':
__set__(inp, 'mix', 'iscf', 0)
return
iscf = 0
if mixing_on == 'density':
iscf += 10
if mixing_scheme == 'Pulay':
iscf += 7
if mixing_scheme == 'Anderson':
iscf += 3
if mixing_scheme == 'Anderson2':
iscf += 4
if mixing_scheme == 'Simple':
iscf += 2
if mixing_scheme == 'CG':
iscf += 5
__set__(inp, 'mix', 'iscf', iscf)
def add_empty_SCF_orbitals(inp, norbs=10):
"""
Insert ``norbs`` empty orbitals in the SCF procedure
Args:
norbs (int): Number of empty orbitals
Warning:
In linear scaling case, this is only meaningful for the direct
minimization approach.
"""
__set__(inp, 'mix', 'norbsempty', norbs)
__set__(inp, 'lin_general', 'extra_states', norbs)
def write_cubefiles_around_fermi_level(inp, nplot=1):
"""
Writes the ``nplot`` orbitals around the fermi level in cube format
Args:
nplot (int): the number of orbitals to print around the fermi level.
Warning:
This is presently meaningful only for a empty states calculation.
Warning:
This would work only for the cubic scaling code at present.
"""
__set__(inp, 'dft', 'nplot', nplot)
def write_orbitals_on_disk(inp, format='binary'):
"""
Set the code to write the orbitals on disk in the provided format
Args:
format (str): The format to write the orbitals with. Accepts the strings:
* 'binary'
* 'text'
* 'etsf' (requires etsf-io enabled)
Todo:
Verify if this option works for a linear scaling calulation.
"""
fmt = format
__set__(inp, 'output', 'orbitals', fmt)
def write_support_functions_on_disk(inp, format='binary', matrices=True,
coefficients=False):
pass
def write_support_function_matrices(inp, format='text'):
"""
Write the matrices of the linear scaling formats.
Args:
format (str): The format to write the orbitals with. Accepts the
strings:
* 'binary'
* 'text'
Todo:
Verify if the binary format is available and set the appropriate values
"""
fmt = 0
if format == 'text':
fmt = 1
elif format == 'binary':
fmt = 4
__set__(inp, 'lin_general', 'output_mat', fmt)
def set_atomic_positions(inp, posinp=None):
"""
Insert the atomic positions as a part of the input dictionary
"""
__set__(inp, | |
<filename>archived/Project2.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat May 1 12:52:41 2021
@author: JP
"""
import os
import pandas as pd
import requests
from windpowerlib import ModelChain, WindTurbine, get_turbine_types
from geopy.distance import geodesic
import geocoder
# Forecast packages
import datetime
from timezonefinderL import TimezoneFinder as TZFind
from meteostat import Point, Hourly
def get_weather_data(filename='weather.csv', **kwargs):
r"""
Imports weather data from a file.
The data include wind speed at two different heights in m/s, air
temperature in two different heights in K, surface roughness length in m
and air pressure in Pa. The height in m for which the data applies is
specified in the second row.
In case no weather data file exists, an example weather data file is
automatically downloaded and stored in the same directory as this example.
Parameters
----------
filename : str
Filename of the weather data file. Default: 'weather.csv'.
Other Parameters
----------------
datapath : str, optional
Path where the weather data file is stored.
Default is the same directory this example is stored in.
Returns
-------
:pandas:`pandas.DataFrame<frame>`
DataFrame with time series for wind speed `wind_speed` in m/s,
temperature `temperature` in K, roughness length `roughness_length`
in m, and pressure `pressure` in Pa.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name as string (e.g. 'wind_speed') and the
second level contains the height as integer at which it applies
(e.g. 10, if it was measured at a height of 10 m). The index is a
DateTimeIndex.
"""
if 'datapath' not in kwargs:
kwargs['datapath'] = os.path.dirname(__file__)
file = os.path.join(kwargs['datapath'], filename)
# download example weather data file in case it does not yet exist
if not os.path.isfile(file):
req = requests.get("https://osf.io/59bqn/download")
with open(file, "wb") as fout:
fout.write(req.content)
# read csv file
weather_df = pd.read_csv(
file,
index_col=0,
header=[0, 1],
date_parser=lambda idx: pd.to_datetime(idx, utc=True))
# change time zone
weather_df.index = weather_df.index.tz_convert(
'Europe/Berlin')
return weather_df
# Parameters for the model
modelchain_data = {
'wind_speed_model': 'logarithmic', # 'logarithmic' (default),
# 'hellman' or
# 'interpolation_extrapolation'
'density_model': 'ideal_gas', # 'barometric' (default), 'ideal_gas'
# or 'interpolation_extrapolation'
'temperature_model': 'linear_gradient', # 'linear_gradient' (def.) or
# 'interpolation_extrapolation'
'power_output_model':
'power_curve', # 'power_curve' (default) or
# 'power_coefficient_curve'
'density_correction': True, # False (default) or True
'obstacle_height': 0, # default: 0
'hellman_exp': None # None (default) or None
}
# Dummy variable for the while loop to begin
turbine = 'a'
weather = None
# Program info and instructions for the user
print(29*'*')
print('TURBINE CALCULATOR TURBO 3000')
print(29*'*')
print()
print('Instructions:')
print("To download weather data for a specific location, type 'w'")
print("If you wish to analyze one turbine, type 's'")
print("If you wish to compare two turbines, type 'c'")
print("If you wish to see the turbine database, type 'db'")
print("If you wish to exit, type 'q' now!")
print()
print("NOTE: You must first download weather data before turbine analysis!")
print()
# Loop to ensure valid input from the user and to perform the desired function
while turbine != 'q':
turbine = input("What do you wish to do? ")
if turbine == 'q':
print("Program exiting...")
break
elif turbine == 'w':
print()
print("WEATHER MODULE")
print(14*'-')
print("Instructions: ")
print("Fill in all the required data for weather history retrieval")
print()
# User inputs:
desired_loc = str(input('Desired location: '))
start_year = input('Start Year: ')
start_month = input('Month (as number 1-12): ')
start_day = input('Day: ')
end_year = input('End Year: ')
end_month = input('Month (as number 1-12): ')
end_day = input('Day: ')
try:
start_year,start_month,start_day = eval(start_year), eval(start_month), eval(start_day)
end_year,end_month,end_day = eval(end_year), eval(end_month), eval(end_day)
except NameError:
print('Invalid input. Please try again')
exit()
# Handling too old data and users from the future that don't know
# the gregorian calendar
# Basic ones first
if start_year < 2000 or start_year > end_year or not isinstance(start_year,int):
start_year = 2000
print('Warning: Fetching the data will likely take a long time')
if (end_year > datetime.datetime.now().year or not isinstance(end_year,int)
or end_year < start_year):
end_year = datetime.datetime.now().year
if not 1<=start_month<=12 or not isinstance(start_month,int):
start_month = 1
if not 1<=end_month<=12 or not isinstance(end_month,int):
end_month = 1
if not 1<=start_day<=31 or not isinstance(start_day,int):
start_day = 1
if not 1<=end_day<=31 or not isinstance(end_day,int):
end_day = 1
# Big brain ones
if start_year == end_year and start_month>=end_month:
start_month = end_month
if start_day > end_day:
start_day = end_day - 1
print('Warning: erroneous dates input. Single day will be obtained')
if end_year == datetime.datetime.now().year:
if end_month > datetime.datetime.now().month:
end_month = datetime.datetime.now().month
if end_month == datetime.datetime.now().month:
if end_day > datetime.datetime.now().day:
end_day = datetime.datetime.now().day
print("Warning: if you'd like to use predicted data",
' please refer to the FutureGainsTurbineGang model')
# Handling location
Location = geocoder.osm(desired_loc)
desired_lat_long = Location.latlng
lat, long = desired_lat_long[0], desired_lat_long[1]
# Determining where user is, to warn if input is very far from them
user_current_loc = geocoder.ip('me')
user_lat_long = user_current_loc.latlng
distance_mi = round(geodesic(user_lat_long,desired_lat_long).miles,2)
# Handling bad user input - location
if isinstance(desired_lat_long,list) is False:
print('Invalid location, please try again!')
continue
distance_mi = 0
if distance_mi > 350:
print('Warning! Your distance to the location is', distance_mi,'miles',
'\n','Consider refining your input using City,State/Province,Country')
YesorNo = input('Would you like to proceed? (Y/N): ')
if YesorNo != ('Y' or 'y'):
print("Weather download cancelled...")
print()
continue
# Handling start year, end year, and standardized timezone
tf = TZFind()
tz = tf.timezone_at(lng=long, lat=lat)
start = datetime.datetime(start_year,start_month,start_day)
end = datetime.datetime(end_year,end_month,end_day)
print()
print("Downloading weather data, please wait...")
# Obtaining weather data
desired_point = Point(lat,long,10) #Finds closest weather station
weather_data = Hourly(desired_point, start, end)
weather_data = weather_data.fetch()
# Manipulating to the format needed
# Weather columns that aren't needed
weather_data = weather_data.drop(['dwpt','rhum','prcp','snow','coco',
'tsun','wdir','wpgt'],axis=1)
weather_data.rename(columns= {'temp':'temperature',
'pres':'pressure',
'wspd':'wind_speed'},inplace=True)
w_data_format = weather_data.reindex(columns = ['pressure','temperature',
'wind_speed'])
# Adding roughness value (obtained from sample weather data)
length = len(w_data_format)
roughness_length_column = [0.15 for i in range(length)]
w_data_format['roughness_length'] = roughness_length_column
# Unit conversions
w_data_format['temperature'] += 273.15 # Kelvin conversion
w_data_format['pressure'] *= 100 # conversion from hPa to Pa
w_data_format['wind_speed'] *= (5/18) # conversion to m/s
# adding heights row
heights_row = []
heights_row.insert(0,{'pressure': 0,'temperature': 2,'wind_speed':10,
'roughness_length':0})
data = pd.concat([pd.DataFrame(heights_row),w_data_format],
ignore_index=False)
# In case of missing data:
data.loc[data["pressure"].isnull(),'pressure'] = 101500
data.loc[data["temperature"].isnull(),'temperature'] = 298
data.loc[data["wind_speed"].isnull(),'wind_speed'] = 0
data.to_csv('Test_file.csv')
print("Done! Writing data to CSV file...")
# Read weather data from csv
weather = get_weather_data(filename='Test_file.csv', datapath='')
print("Done!")
continue
elif turbine == 'db':
df=get_turbine_types(print_out=True)
continue
elif turbine == 's':
df = get_turbine_types(print_out = False)
turb_correct = False
power_gen = []
revenue_list = []
cost_list = []
profit_list = []
cum_revenue = []
cum_profit = []
if weather is None:
print("No weather data generated yet! Please do this first...")
continue
else:
pass
print()
print("SINGLE TURBINE ANALYSIS MODULE")
print(30*"-")
# User input of turbine to analyze. Loop to ensure valid turbine name is entered
while turb_correct == False:
turb_name = input("Enter the name of the turbine you wish to analyze EXACTLY as it appears in the database: ")
if turb_name in df.values:
turb_correct = True
else:
print("This turbine is not in the database! Try again...")
continue
# Parameter input from user. If no value is entered, default values will be used
hub_height = input("Enter the hub height for the turbine: ")
if hub_height == '0' or hub_height == '':
print("WARNING! Default value will be used...")
hub_height = 135
else:
hub_height = int(hub_height)
ccost = input('Enter the construction cost for a single {} turbine: '.format(turb_name))
if ccost == '0' or ccost == '':
print("WARNING! Default value will be used...")
ccost = 10000000
else:
ccost = int(ccost)
mfactor = (input("Enter the annual maintenance cost as a percentage of the construction cost (w/o %): "))
if mfactor == '0' or mfactor == '':
print("WARNING! Default value will be used...")
mfactor = 0.02
else:
mfactor = float(mfactor) / 100
revenue = (input("Enter the expected revenue per kWh: "))
if revenue == '0' or revenue == '':
print("WARNING! Default value will be used...")
revenue = 0.07
else:
revenue = float(revenue)
print()
print()
turbine_data = {'turbine_type': turb_name,
'hub_height': hub_height,
'construction_cost': ccost,
'maint_cost': mfactor,
'revenue': revenue}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.